Subversion Repositories Kolibri OS

Rev

Rev 2005 | Rev 3192 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2005 Rev 2997
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
-
 
27
 *          Christian Konig
27
 */
28
 */
28
#include 
29
#include 
29
#include 
30
#include 
30
#include "drmP.h"
31
#include 
31
#include "radeon_drm.h"
32
#include 
32
#include "radeon_reg.h"
33
#include "radeon_reg.h"
33
#include "radeon.h"
34
#include "radeon.h"
34
#include "atom.h"
35
#include "atom.h"
35
 
-
 
36
int radeon_debugfs_ib_init(struct radeon_device *rdev);
-
 
37
 
36
 
38
/*
37
/*
-
 
38
 * IB
-
 
39
 * IBs (Indirect Buffers) and areas of GPU accessible memory where
-
 
40
 * commands are stored.  You can put a pointer to the IB in the
-
 
41
 * command ring and the hw will fetch the commands from the IB
-
 
42
 * and execute them.  Generally userspace acceleration drivers
-
 
43
 * produce command buffers which are send to the kernel and
-
 
44
 * put in IBs for execution by the requested ring.
-
 
45
 */
-
 
46
static int radeon_debugfs_sa_init(struct radeon_device *rdev);
-
 
47
 
-
 
48
/**
-
 
49
 * radeon_ib_get - request an IB (Indirect Buffer)
-
 
50
 *
-
 
51
 * @rdev: radeon_device pointer
-
 
52
 * @ring: ring index the IB is associated with
-
 
53
 * @ib: IB object returned
-
 
54
 * @size: requested IB size
-
 
55
 *
-
 
56
 * Request an IB (all asics).  IBs are allocated using the
-
 
57
 * suballocator.
39
 * IB.
58
 * Returns 0 on success, error on failure.
40
 */
59
 */
-
 
60
int radeon_ib_get(struct radeon_device *rdev, int ring,
-
 
61
		  struct radeon_ib *ib, struct radeon_vm *vm,
41
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
62
		  unsigned size)
42
{
-
 
43
	struct radeon_fence *fence;
-
 
44
	struct radeon_ib *nib;
63
{
45
	int r = 0, i, c;
-
 
46
 
64
	int i, r;
47
	*ib = NULL;
65
 
48
	r = radeon_fence_create(rdev, &fence);
66
	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
49
	if (r) {
67
	if (r) {
50
		dev_err(rdev->dev, "failed to create fence for new IB\n");
68
		dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
51
		return r;
-
 
52
	}
-
 
53
    mutex_lock(&rdev->ib_pool.mutex);
-
 
54
	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
-
 
55
		i &= (RADEON_IB_POOL_SIZE - 1);
-
 
56
		if (rdev->ib_pool.ibs[i].free) {
-
 
57
			nib = &rdev->ib_pool.ibs[i];
-
 
58
			break;
69
		return r;
59
        }
-
 
60
	}
-
 
61
	if (nib == NULL) {
-
 
62
		/* This should never happen, it means we allocated all
-
 
63
		 * IB and haven't scheduled one yet, return EBUSY to
-
 
64
		 * userspace hoping that on ioctl recall we get better
-
 
65
		 * luck
-
 
66
		 */
-
 
67
		dev_err(rdev->dev, "no free indirect buffer !\n");
-
 
68
		mutex_unlock(&rdev->ib_pool.mutex);
-
 
69
		radeon_fence_unref(&fence);
-
 
70
		return -EBUSY;
-
 
71
	}
-
 
72
	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
-
 
73
	nib->free = false;
-
 
74
	if (nib->fence) {
70
	}
75
		mutex_unlock(&rdev->ib_pool.mutex);
71
 
76
		r = radeon_fence_wait(nib->fence, false);
-
 
77
		if (r) {
-
 
78
			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
-
 
79
				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
-
 
80
			mutex_lock(&rdev->ib_pool.mutex);
-
 
81
			nib->free = true;
-
 
82
			mutex_unlock(&rdev->ib_pool.mutex);
72
	r = radeon_semaphore_create(rdev, &ib->semaphore);
83
            radeon_fence_unref(&fence);
73
		if (r) {
84
			return r;
-
 
85
		}
-
 
86
		mutex_lock(&rdev->ib_pool.mutex);
-
 
87
	}
-
 
88
    radeon_fence_unref(&nib->fence);
-
 
89
	nib->fence = fence;
-
 
90
	nib->length_dw = 0;
-
 
91
	mutex_unlock(&rdev->ib_pool.mutex);
-
 
92
	*ib = nib;
-
 
-
 
74
			return r;
-
 
75
		}
-
 
76
 
-
 
77
	ib->ring = ring;
-
 
78
	ib->fence = NULL;
-
 
79
	ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
93
	return 0;
80
	ib->vm = vm;
-
 
81
	if (vm) {
-
 
82
		/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
-
 
83
		 * space and soffset is the offset inside the pool bo
-
 
84
		 */
94
}
85
		ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
95
 
86
	} else {
-
 
87
	ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
-
 
88
	}
96
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
-
 
97
{
-
 
98
	struct radeon_ib *tmp = *ib;
89
	ib->is_const_ib = false;
99
 
-
 
100
	*ib = NULL;
-
 
101
	if (tmp == NULL) {
-
 
102
		return;
-
 
103
	}
-
 
104
	if (!tmp->fence->emited)
-
 
105
		radeon_fence_unref(&tmp->fence);
90
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
-
 
91
		ib->sync_to[i] = NULL;
-
 
92
 
-
 
93
	return 0;
-
 
94
}
-
 
95
 
-
 
96
/**
-
 
97
 * radeon_ib_free - free an IB (Indirect Buffer)
-
 
98
 *
106
	mutex_lock(&rdev->ib_pool.mutex);
99
 * @rdev: radeon_device pointer
107
	tmp->free = true;
100
 * @ib: IB object to free
-
 
101
 *
-
 
102
 * Free an IB (all asics).
-
 
103
 */
-
 
104
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
-
 
105
{
-
 
106
	radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
-
 
107
	radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
-
 
108
	radeon_fence_unref(&ib->fence);
-
 
109
}
-
 
110
 
-
 
111
/**
-
 
112
 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
-
 
113
 *
-
 
114
 * @rdev: radeon_device pointer
-
 
115
 * @ib: IB object to schedule
-
 
116
 * @const_ib: Const IB to schedule (SI only)
-
 
117
 *
-
 
118
 * Schedule an IB on the associated ring (all asics).
-
 
119
 * Returns 0 on success, error on failure.
-
 
120
 *
-
 
121
 * On SI, there are two parallel engines fed from the primary ring,
-
 
122
 * the CE (Constant Engine) and the DE (Drawing Engine).  Since
-
 
123
 * resource descriptors have moved to memory, the CE allows you to
-
 
124
 * prime the caches while the DE is updating register state so that
-
 
125
 * the resource descriptors will be already in cache when the draw is
-
 
126
 * processed.  To accomplish this, the userspace driver submits two
-
 
127
 * IBs, one for the CE and one for the DE.  If there is a CE IB (called
-
 
128
 * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
-
 
129
 * to SI there was just a DE IB.
-
 
130
 */
108
	mutex_unlock(&rdev->ib_pool.mutex);
131
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
109
}
132
		       struct radeon_ib *const_ib)
110
 
133
{
111
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
134
	struct radeon_ring *ring = &rdev->ring[ib->ring];
112
{
135
	bool need_sync = false;
113
	int r = 0;
136
	int i, r = 0;
114
 
137
 
115
	if (!ib->length_dw || !rdev->cp.ready) {
138
	if (!ib->length_dw || !ring->ready) {
116
		/* TODO: Nothings in the ib we should report. */
139
		/* TODO: Nothings in the ib we should report. */
117
		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
140
		dev_err(rdev->dev, "couldn't schedule ib\n");
118
		return -EINVAL;
141
		return -EINVAL;
119
	}
142
	}
120
 
143
 
121
	/* 64 dwords should be enough for fence too */
144
	/* 64 dwords should be enough for fence too */
122
	r = radeon_ring_lock(rdev, 64);
145
	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
123
	if (r) {
146
	if (r) {
124
		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
147
		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
125
		return r;
148
		return r;
126
	}
149
	}
-
 
150
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
151
		struct radeon_fence *fence = ib->sync_to[i];
-
 
152
		if (radeon_fence_need_sync(fence, ib->ring)) {
-
 
153
			need_sync = true;
-
 
154
			radeon_semaphore_sync_rings(rdev, ib->semaphore,
-
 
155
						    fence->ring, ib->ring);
-
 
156
			radeon_fence_note_sync(fence, ib->ring);
-
 
157
		}
-
 
158
	}
-
 
159
	/* immediately free semaphore when we don't need to sync */
-
 
160
	if (!need_sync) {
-
 
161
		radeon_semaphore_free(rdev, &ib->semaphore, NULL);
-
 
162
	}
-
 
163
	/* if we can't remember our last VM flush then flush now! */
-
 
164
	if (ib->vm && !ib->vm->last_flush) {
-
 
165
		radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
-
 
166
	}
-
 
167
	if (const_ib) {
-
 
168
		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
-
 
169
		radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
-
 
170
	}
127
	radeon_ring_ib_execute(rdev, ib);
171
	radeon_ring_ib_execute(rdev, ib->ring, ib);
128
	radeon_fence_emit(rdev, ib->fence);
172
	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
-
 
173
	if (r) {
-
 
174
		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
129
	mutex_lock(&rdev->ib_pool.mutex);
175
		radeon_ring_unlock_undo(rdev, ring);
-
 
176
		return r;
-
 
177
	}
-
 
178
	if (const_ib) {
-
 
179
		const_ib->fence = radeon_fence_ref(ib->fence);
-
 
180
	}
130
	/* once scheduled IB is considered free and protected by the fence */
181
	/* we just flushed the VM, remember that */
131
	ib->free = true;
182
	if (ib->vm && !ib->vm->last_flush) {
132
	mutex_unlock(&rdev->ib_pool.mutex);
183
		ib->vm->last_flush = radeon_fence_ref(ib->fence);
-
 
184
	}
133
	radeon_ring_unlock_commit(rdev);
185
	radeon_ring_unlock_commit(rdev, ring);
134
	return 0;
186
	return 0;
135
}
187
}
-
 
188
 
-
 
189
/**
-
 
190
 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
-
 
191
 *
-
 
192
 * @rdev: radeon_device pointer
-
 
193
 *
-
 
194
 * Initialize the suballocator to manage a pool of memory
-
 
195
 * for use as IBs (all asics).
-
 
196
 * Returns 0 on success, error on failure.
136
 
197
 */
137
int radeon_ib_pool_init(struct radeon_device *rdev)
198
int radeon_ib_pool_init(struct radeon_device *rdev)
138
{
-
 
139
	void *ptr;
-
 
140
	uint64_t gpu_addr;
199
{
141
	int i;
-
 
142
	int r = 0;
200
	int r;
143
 
201
 
144
	if (rdev->ib_pool.robj)
-
 
145
		return 0;
-
 
146
	INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
-
 
147
	/* Allocate 1M object buffer */
-
 
148
	r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
-
 
149
			     PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
-
 
150
				&rdev->ib_pool.robj);
-
 
151
	if (r) {
-
 
152
		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
202
	if (rdev->ib_pool_ready) {
153
		return r;
203
		return 0;
154
	}
204
	}
155
	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
-
 
156
	if (unlikely(r != 0))
205
	r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
157
		return r;
206
				      RADEON_IB_POOL_SIZE*64*1024,
158
	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
-
 
159
	if (r) {
-
 
160
		radeon_bo_unreserve(rdev->ib_pool.robj);
207
				      RADEON_GEM_DOMAIN_GTT);
161
		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
208
	if (r) {
-
 
209
		return r;
162
		return r;
210
	}
163
	}
-
 
164
	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
211
 
165
	radeon_bo_unreserve(rdev->ib_pool.robj);
-
 
166
	if (r) {
212
	r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
167
		DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
213
	if (r) {
168
		return r;
-
 
169
	}
-
 
170
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
-
 
171
		unsigned offset;
-
 
172
 
-
 
173
		offset = i * 64 * 1024;
-
 
174
		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
-
 
175
		rdev->ib_pool.ibs[i].ptr = ptr + offset;
-
 
176
		rdev->ib_pool.ibs[i].idx = i;
-
 
177
		rdev->ib_pool.ibs[i].length_dw = 0;
-
 
178
		rdev->ib_pool.ibs[i].free = true;
214
		return r;
179
	}
-
 
180
	rdev->ib_pool.head_id = 0;
215
	}
181
	rdev->ib_pool.ready = true;
216
 
182
	DRM_INFO("radeon: ib pool ready.\n");
217
	rdev->ib_pool_ready = true;
183
	if (radeon_debugfs_ib_init(rdev)) {
218
	if (radeon_debugfs_sa_init(rdev)) {
184
		DRM_ERROR("Failed to register debugfs file for IB !\n");
219
		dev_err(rdev->dev, "failed to register debugfs file for SA\n");
-
 
220
	}
-
 
221
	return 0;
-
 
222
}
-
 
223
 
-
 
224
/**
-
 
225
 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
-
 
226
 *
-
 
227
 * @rdev: radeon_device pointer
185
	}
228
 *
186
	return r;
229
 * Tear down the suballocator managing the pool of memory
-
 
230
 * for use as IBs (all asics).
-
 
231
 */
-
 
232
void radeon_ib_pool_fini(struct radeon_device *rdev)
-
 
233
{
-
 
234
	if (rdev->ib_pool_ready) {
-
 
235
		radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
-
 
236
		radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
-
 
237
		rdev->ib_pool_ready = false;
-
 
238
	}
-
 
239
}
-
 
240
 
-
 
241
/**
-
 
242
 * radeon_ib_ring_tests - test IBs on the rings
-
 
243
 *
-
 
244
 * @rdev: radeon_device pointer
-
 
245
 *
-
 
246
 * Test an IB (Indirect Buffer) on each ring.
-
 
247
 * If the test fails, disable the ring.
-
 
248
 * Returns 0 on success, error if the primary GFX ring
-
 
249
 * IB test fails.
187
}
250
 */
188
 
-
 
189
void radeon_ib_pool_fini(struct radeon_device *rdev)
251
int radeon_ib_ring_tests(struct radeon_device *rdev)
190
{
-
 
191
	int r;
-
 
192
	struct radeon_bo *robj;
-
 
193
 
252
{
194
	if (!rdev->ib_pool.ready) {
-
 
195
		return;
-
 
196
	}
-
 
197
	mutex_lock(&rdev->ib_pool.mutex);
253
	unsigned i;
-
 
254
	int r;
-
 
255
 
198
//   radeon_ib_bogus_cleanup(rdev);
256
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
199
	robj = rdev->ib_pool.robj;
257
		struct radeon_ring *ring = &rdev->ring[i];
200
	rdev->ib_pool.robj = NULL;
258
 
-
 
259
		if (!ring->ready)
-
 
260
			continue;
-
 
261
 
-
 
262
		r = radeon_ib_test(rdev, i, ring);
-
 
263
		if (r) {
201
	mutex_unlock(&rdev->ib_pool.mutex);
264
			ring->ready = false;
-
 
265
 
-
 
266
			if (i == RADEON_RING_TYPE_GFX_INDEX) {
202
 
267
				/* oh, oh, that's really bad */
-
 
268
				DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
203
	if (robj) {
269
		                rdev->accel_working = false;
204
		r = radeon_bo_reserve(robj, false);
-
 
205
		if (likely(r == 0)) {
270
				return r;
206
			radeon_bo_kunmap(robj);
271
 
-
 
272
			} else {
207
			radeon_bo_unpin(robj);
273
				/* still not good, but we can live with it */
208
			radeon_bo_unreserve(robj);
274
				DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
209
		}
275
	}
-
 
276
		}
-
 
277
	}
-
 
278
	return 0;
-
 
279
}
-
 
280
 
-
 
281
/*
-
 
282
 * Rings
-
 
283
 * Most engines on the GPU are fed via ring buffers.  Ring
-
 
284
 * buffers are areas of GPU accessible memory that the host
-
 
285
 * writes commands into and the GPU reads commands out of.
210
		radeon_bo_unref(&robj);
286
 * There is a rptr (read pointer) that determines where the
-
 
287
 * GPU is currently reading, and a wptr (write pointer)
-
 
288
 * which determines where the host has written.  When the
-
 
289
 * pointers are equal, the ring is idle.  When the host
-
 
290
 * writes commands to the ring buffer, it increments the
-
 
291
 * wptr.  The GPU then starts fetching commands and executes
-
 
292
 * them until the pointers are equal again.
-
 
293
 */
-
 
294
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
-
 
295
 
-
 
296
/**
211
	}
297
 * radeon_ring_write - write a value to the ring
212
}
298
 *
-
 
299
 * @ring: radeon_ring structure holding ring information
-
 
300
 * @v: dword (dw) value to write
-
 
301
 *
-
 
302
 * Write a value to the requested ring buffer (all asics).
-
 
303
 */
-
 
304
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-
 
305
{
-
 
306
#if DRM_DEBUG_CODE
-
 
307
	if (ring->count_dw <= 0) {
-
 
308
		DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
-
 
309
	}
-
 
310
#endif
-
 
311
	ring->ring[ring->wptr++] = v;
-
 
312
	ring->wptr &= ring->ptr_mask;
-
 
313
	ring->count_dw--;
-
 
314
	ring->ring_free_dw--;
-
 
315
}
-
 
316
 
-
 
317
/**
-
 
318
 * radeon_ring_supports_scratch_reg - check if the ring supports
-
 
319
 * writing to scratch registers
-
 
320
 *
-
 
321
 * @rdev: radeon_device pointer
-
 
322
 * @ring: radeon_ring structure holding ring information
-
 
323
 *
-
 
324
 * Check if a specific ring supports writing to scratch registers (all asics).
-
 
325
 * Returns true if the ring supports writing to scratch regs, false if not.
-
 
326
 */
-
 
327
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
-
 
328
				      struct radeon_ring *ring)
-
 
329
{
-
 
330
	switch (ring->idx) {
-
 
331
	case RADEON_RING_TYPE_GFX_INDEX:
-
 
332
	case CAYMAN_RING_TYPE_CP1_INDEX:
-
 
333
	case CAYMAN_RING_TYPE_CP2_INDEX:
-
 
334
		return true;
-
 
335
	default:
-
 
336
		return false;
-
 
337
	}
-
 
338
}
-
 
339
 
-
 
340
/**
-
 
341
 * radeon_ring_free_size - update the free size
-
 
342
 *
-
 
343
 * @rdev: radeon_device pointer
-
 
344
 * @ring: radeon_ring structure holding ring information
213
 
345
 *
214
 
346
 * Update the free dw slots in the ring buffer (all asics).
215
/*
-
 
216
 * Ring.
-
 
217
 */
-
 
218
void radeon_ring_free_size(struct radeon_device *rdev)
347
 */
219
{
348
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
220
	if (rdev->wb.enabled)
-
 
-
 
349
{
221
		rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
350
	u32 rptr;
222
	else {
351
 
223
	if (rdev->family >= CHIP_R600)
352
	if (rdev->wb.enabled)
224
		rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
353
		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
225
	else
354
	else
226
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
355
		rptr = RREG32(ring->rptr_reg);
227
	}
356
	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
228
	/* This works because ring_size is a power of 2 */
357
	/* This works because ring_size is a power of 2 */
229
	rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
358
	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
230
	rdev->cp.ring_free_dw -= rdev->cp.wptr;
359
	ring->ring_free_dw -= ring->wptr;
231
	rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
360
	ring->ring_free_dw &= ring->ptr_mask;
232
	if (!rdev->cp.ring_free_dw) {
361
	if (!ring->ring_free_dw) {
233
		rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
362
		ring->ring_free_dw = ring->ring_size / 4;
234
	}
363
	}
235
}
364
}
-
 
365
 
-
 
366
/**
-
 
367
 * radeon_ring_alloc - allocate space on the ring buffer
-
 
368
 *
-
 
369
 * @rdev: radeon_device pointer
-
 
370
 * @ring: radeon_ring structure holding ring information
-
 
371
 * @ndw: number of dwords to allocate in the ring buffer
-
 
372
 *
-
 
373
 * Allocate @ndw dwords in the ring buffer (all asics).
-
 
374
 * Returns 0 on success, error on failure.
236
 
375
 */
237
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
376
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
238
{
377
{
239
	int r;
378
	int r;
240
 
379
 
241
	/* Align requested size with padding so unlock_commit can
380
	/* Align requested size with padding so unlock_commit can
242
	 * pad safely */
381
	 * pad safely */
243
	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
382
	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
244
	while (ndw > (rdev->cp.ring_free_dw - 1)) {
383
	while (ndw > (ring->ring_free_dw - 1)) {
245
		radeon_ring_free_size(rdev);
384
		radeon_ring_free_size(rdev, ring);
246
		if (ndw < rdev->cp.ring_free_dw) {
385
		if (ndw < ring->ring_free_dw) {
247
			break;
386
			break;
248
		}
387
		}
249
//        r = radeon_fence_wait_next(rdev);
388
//        r = radeon_fence_wait_next(rdev);
250
//       if (r) {
389
//       if (r) {
251
//           mutex_unlock(&rdev->cp.mutex);
390
//           mutex_unlock(&rdev->cp.mutex);
252
//           return r;
391
//           return r;
253
//       }
392
//       }
254
	}
393
	}
255
	rdev->cp.count_dw = ndw;
394
	ring->count_dw = ndw;
256
	rdev->cp.wptr_old = rdev->cp.wptr;
395
	ring->wptr_old = ring->wptr;
257
	return 0;
396
	return 0;
258
}
397
}
-
 
398
 
-
 
399
/**
-
 
400
 * radeon_ring_lock - lock the ring and allocate space on it
-
 
401
 *
-
 
402
 * @rdev: radeon_device pointer
-
 
403
 * @ring: radeon_ring structure holding ring information
-
 
404
 * @ndw: number of dwords to allocate in the ring buffer
-
 
405
 *
-
 
406
 * Lock the ring and allocate @ndw dwords in the ring buffer
-
 
407
 * (all asics).
-
 
408
 * Returns 0 on success, error on failure.
259
 
409
 */
260
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
410
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
261
{
411
{
262
	int r;
412
	int r;
263
 
413
 
264
	mutex_lock(&rdev->cp.mutex);
414
	mutex_lock(&rdev->ring_lock);
265
	r = radeon_ring_alloc(rdev, ndw);
415
	r = radeon_ring_alloc(rdev, ring, ndw);
266
	if (r) {
416
	if (r) {
267
		mutex_unlock(&rdev->cp.mutex);
417
		mutex_unlock(&rdev->ring_lock);
268
		return r;
418
		return r;
269
	}
419
	}
270
	return 0;
420
	return 0;
271
}
421
}
-
 
422
 
-
 
423
/**
-
 
424
 * radeon_ring_commit - tell the GPU to execute the new
-
 
425
 * commands on the ring buffer
-
 
426
 *
-
 
427
 * @rdev: radeon_device pointer
-
 
428
 * @ring: radeon_ring structure holding ring information
-
 
429
 *
-
 
430
 * Update the wptr (write pointer) to tell the GPU to
-
 
431
 * execute new commands on the ring buffer (all asics).
272
 
432
 */
273
void radeon_ring_commit(struct radeon_device *rdev)
433
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
274
{
-
 
275
	unsigned count_dw_pad;
-
 
276
	unsigned i;
-
 
277
 
434
{
278
	/* We pad to match fetch size */
-
 
279
	count_dw_pad = (rdev->cp.align_mask + 1) -
435
	/* We pad to match fetch size */
280
		       (rdev->cp.wptr & rdev->cp.align_mask);
-
 
281
	for (i = 0; i < count_dw_pad; i++) {
436
	while (ring->wptr & ring->align_mask) {
282
		radeon_ring_write(rdev, 2 << 30);
437
		radeon_ring_write(ring, ring->nop);
283
	}
438
	}
-
 
439
	DRM_MEMORYBARRIER();
284
	DRM_MEMORYBARRIER();
440
	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
285
	radeon_cp_commit(rdev);
441
	(void)RREG32(ring->wptr_reg);
-
 
442
}
-
 
443
 
-
 
444
/**
-
 
445
 * radeon_ring_unlock_commit - tell the GPU to execute the new
-
 
446
 * commands on the ring buffer and unlock it
-
 
447
 *
-
 
448
 * @rdev: radeon_device pointer
-
 
449
 * @ring: radeon_ring structure holding ring information
-
 
450
 *
286
}
451
 * Call radeon_ring_commit() then unlock the ring (all asics).
287
 
452
 */
288
void radeon_ring_unlock_commit(struct radeon_device *rdev)
453
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
289
{
454
{
290
	radeon_ring_commit(rdev);
455
	radeon_ring_commit(rdev, ring);
-
 
456
	mutex_unlock(&rdev->ring_lock);
-
 
457
}
-
 
458
 
-
 
459
/**
-
 
460
 * radeon_ring_undo - reset the wptr
-
 
461
 *
-
 
462
 * @ring: radeon_ring structure holding ring information
291
	mutex_unlock(&rdev->cp.mutex);
463
 *
-
 
464
 * Reset the driver's copy of the wtpr (all asics).
-
 
465
 */
-
 
466
void radeon_ring_undo(struct radeon_ring *ring)
-
 
467
{
-
 
468
	ring->wptr = ring->wptr_old;
-
 
469
}
-
 
470
 
-
 
471
/**
-
 
472
 * radeon_ring_unlock_undo - reset the wptr and unlock the ring
-
 
473
 *
-
 
474
 * @ring: radeon_ring structure holding ring information
-
 
475
 *
292
}
476
 * Call radeon_ring_undo() then unlock the ring (all asics).
293
 
477
 */
294
void radeon_ring_unlock_undo(struct radeon_device *rdev)
478
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
295
{
479
{
-
 
480
	radeon_ring_undo(ring);
-
 
481
	mutex_unlock(&rdev->ring_lock);
-
 
482
}
-
 
483
 
-
 
484
/**
-
 
485
 * radeon_ring_force_activity - add some nop packets to the ring
-
 
486
 *
-
 
487
 * @rdev: radeon_device pointer
-
 
488
 * @ring: radeon_ring structure holding ring information
296
	rdev->cp.wptr = rdev->cp.wptr_old;
489
 *
297
	mutex_unlock(&rdev->cp.mutex);
490
 * Add some nop packets to the ring to force activity (all asics).
298
}
491
 * Used for lockup detection to see if the rptr is advancing.
299
 
492
 */
300
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
493
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
301
{
494
{
302
	int r;
495
	int r;
-
 
496
 
-
 
497
	radeon_ring_free_size(rdev, ring);
-
 
498
	if (ring->rptr == ring->wptr) {
-
 
499
		r = radeon_ring_alloc(rdev, ring, 1);
-
 
500
		if (!r) {
-
 
501
			radeon_ring_write(ring, ring->nop);
-
 
502
			radeon_ring_commit(rdev, ring);
-
 
503
		}
-
 
504
	}
-
 
505
}
-
 
506
 
-
 
507
/**
-
 
508
 * radeon_ring_force_activity - update lockup variables
-
 
509
 *
-
 
510
 * @ring: radeon_ring structure holding ring information
-
 
511
 *
-
 
512
 * Update the last rptr value and timestamp (all asics).
-
 
513
 */
-
 
514
void radeon_ring_lockup_update(struct radeon_ring *ring)
-
 
515
{
-
 
516
	ring->last_rptr = ring->rptr;
-
 
517
	ring->last_activity = GetTimerTicks();
-
 
518
}
-
 
519
 
-
 
520
/**
-
 
521
 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
-
 
522
 * @rdev:       radeon device structure
-
 
523
 * @ring:       radeon_ring structure holding ring information
-
 
524
 *
-
 
525
 * We don't need to initialize the lockup tracking information as we will either
-
 
526
 * have CP rptr to a different value of jiffies wrap around which will force
-
 
527
 * initialization of the lockup tracking informations.
-
 
528
 *
-
 
529
 * A possible false positivie is if we get call after while and last_cp_rptr ==
-
 
530
 * the current CP rptr, even if it's unlikely it might happen. To avoid this
-
 
531
 * if the elapsed time since last call is bigger than 2 second than we return
-
 
532
 * false and update the tracking information. Due to this the caller must call
-
 
533
 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
-
 
534
 * the fencing code should be cautious about that.
-
 
535
 *
-
 
536
 * Caller should write to the ring to force CP to do something so we don't get
-
 
537
 * false positive when CP is just gived nothing to do.
-
 
538
 *
-
 
539
 **/
-
 
540
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
541
{
-
 
542
	unsigned long cjiffies, elapsed;
-
 
543
	uint32_t rptr;
-
 
544
 
-
 
545
	cjiffies = GetTimerTicks();
-
 
546
	if (!time_after(cjiffies, ring->last_activity)) {
-
 
547
		/* likely a wrap around */
-
 
548
		radeon_ring_lockup_update(ring);
-
 
549
		return false;
-
 
550
	}
-
 
551
	rptr = RREG32(ring->rptr_reg);
-
 
552
	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
-
 
553
	if (ring->rptr != ring->last_rptr) {
-
 
554
		/* CP is still working no lockup */
-
 
555
		radeon_ring_lockup_update(ring);
-
 
556
		return false;
-
 
557
	}
-
 
558
	elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
-
 
559
	if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
-
 
560
		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
-
 
561
		return true;
-
 
562
	}
-
 
563
	/* give a chance to the GPU ... */
-
 
564
	return false;
-
 
565
}
-
 
566
 
-
 
567
/**
-
 
568
 * radeon_ring_backup - Back up the content of a ring
-
 
569
 *
-
 
570
 * @rdev: radeon_device pointer
-
 
571
 * @ring: the ring we want to back up
-
 
572
 *
-
 
573
 * Saves all unprocessed commits from a ring, returns the number of dwords saved.
-
 
574
 */
-
 
575
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
-
 
576
			    uint32_t **data)
-
 
577
{
-
 
578
	unsigned size, ptr, i;
-
 
579
 
-
 
580
	/* just in case lock the ring */
-
 
581
	mutex_lock(&rdev->ring_lock);
-
 
582
	*data = NULL;
-
 
583
 
-
 
584
	if (ring->ring_obj == NULL) {
-
 
585
		mutex_unlock(&rdev->ring_lock);
-
 
586
		return 0;
-
 
587
	}
-
 
588
 
-
 
589
	/* it doesn't make sense to save anything if all fences are signaled */
-
 
590
	if (!radeon_fence_count_emitted(rdev, ring->idx)) {
-
 
591
		mutex_unlock(&rdev->ring_lock);
-
 
592
		return 0;
-
 
593
	}
-
 
594
 
-
 
595
	/* calculate the number of dw on the ring */
-
 
596
	if (ring->rptr_save_reg)
-
 
597
		ptr = RREG32(ring->rptr_save_reg);
-
 
598
	else if (rdev->wb.enabled)
-
 
599
		ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
 
600
	else {
-
 
601
		/* no way to read back the next rptr */
-
 
602
		mutex_unlock(&rdev->ring_lock);
-
 
603
		return 0;
-
 
604
	}
-
 
605
 
-
 
606
	size = ring->wptr + (ring->ring_size / 4);
-
 
607
	size -= ptr;
-
 
608
	size &= ring->ptr_mask;
-
 
609
	if (size == 0) {
-
 
610
		mutex_unlock(&rdev->ring_lock);
-
 
611
		return 0;
-
 
612
	}
-
 
613
 
-
 
614
	/* and then save the content of the ring */
-
 
615
	*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
-
 
616
	if (!*data) {
-
 
617
		mutex_unlock(&rdev->ring_lock);
-
 
618
		return 0;
-
 
619
	}
-
 
620
	for (i = 0; i < size; ++i) {
-
 
621
		(*data)[i] = ring->ring[ptr++];
-
 
622
		ptr &= ring->ptr_mask;
-
 
623
	}
-
 
624
 
-
 
625
	mutex_unlock(&rdev->ring_lock);
-
 
626
	return size;
-
 
627
}
-
 
628
 
-
 
629
/**
-
 
630
 * radeon_ring_restore - append saved commands to the ring again
-
 
631
 *
-
 
632
 * @rdev: radeon_device pointer
-
 
633
 * @ring: ring to append commands to
-
 
634
 * @size: number of dwords we want to write
-
 
635
 * @data: saved commands
-
 
636
 *
-
 
637
 * Allocates space on the ring and restore the previously saved commands.
-
 
638
 */
-
 
639
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
-
 
640
			unsigned size, uint32_t *data)
-
 
641
{
-
 
642
	int i, r;
-
 
643
 
-
 
644
	if (!size || !data)
-
 
645
		return 0;
-
 
646
 
-
 
647
	/* restore the saved ring content */
-
 
648
	r = radeon_ring_lock(rdev, ring, size);
-
 
649
	if (r)
-
 
650
		return r;
-
 
651
 
-
 
652
	for (i = 0; i < size; ++i) {
-
 
653
		radeon_ring_write(ring, data[i]);
-
 
654
	}
-
 
655
 
-
 
656
	radeon_ring_unlock_commit(rdev, ring);
-
 
657
	kfree(data);
-
 
658
	return 0;
-
 
659
}
-
 
660
 
-
 
661
/**
-
 
662
 * radeon_ring_init - init driver ring struct.
-
 
663
 *
-
 
664
 * @rdev: radeon_device pointer
-
 
665
 * @ring: radeon_ring structure holding ring information
-
 
666
 * @ring_size: size of the ring
-
 
667
 * @rptr_offs: offset of the rptr writeback location in the WB buffer
-
 
668
 * @rptr_reg: MMIO offset of the rptr register
-
 
669
 * @wptr_reg: MMIO offset of the wptr register
-
 
670
 * @ptr_reg_shift: bit offset of the rptr/wptr values
-
 
671
 * @ptr_reg_mask: bit mask of the rptr/wptr values
-
 
672
 * @nop: nop packet for this ring
-
 
673
 *
-
 
674
 * Initialize the driver information for the selected ring (all asics).
-
 
675
 * Returns 0 on success, error on failure.
-
 
676
 */
-
 
677
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
-
 
678
		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
-
 
679
		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
-
 
680
{
-
 
681
	int r;
303
 
682
 
-
 
683
	ring->ring_size = ring_size;
-
 
684
	ring->rptr_offs = rptr_offs;
-
 
685
	ring->rptr_reg = rptr_reg;
-
 
686
	ring->wptr_reg = wptr_reg;
-
 
687
	ring->ptr_reg_shift = ptr_reg_shift;
-
 
688
	ring->ptr_reg_mask = ptr_reg_mask;
304
	rdev->cp.ring_size = ring_size;
689
	ring->nop = nop;
305
    /* Allocate ring buffer */
690
    /* Allocate ring buffer */
306
	if (rdev->cp.ring_obj == NULL) {
691
	if (ring->ring_obj == NULL) {
307
		r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
692
		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
308
					 RADEON_GEM_DOMAIN_GTT,
693
					 RADEON_GEM_DOMAIN_GTT,
309
					 &rdev->cp.ring_obj);
694
				     NULL, &ring->ring_obj);
310
		if (r) {
695
		if (r) {
311
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
696
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
312
			return r;
697
			return r;
313
		}
698
		}
314
		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
699
		r = radeon_bo_reserve(ring->ring_obj, false);
315
		if (unlikely(r != 0))
700
		if (unlikely(r != 0))
316
			return r;
701
			return r;
317
		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
702
		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
318
				      &rdev->cp.gpu_addr);
703
					&ring->gpu_addr);
319
		if (r) {
704
		if (r) {
320
			radeon_bo_unreserve(rdev->cp.ring_obj);
705
			radeon_bo_unreserve(ring->ring_obj);
321
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
706
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
322
			return r;
707
			return r;
323
		}
708
		}
324
		r = radeon_bo_kmap(rdev->cp.ring_obj,
709
		r = radeon_bo_kmap(ring->ring_obj,
325
				       (void **)&rdev->cp.ring);
710
				       (void **)&ring->ring);
326
		radeon_bo_unreserve(rdev->cp.ring_obj);
711
		radeon_bo_unreserve(ring->ring_obj);
327
		if (r) {
712
		if (r) {
328
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
713
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
329
			return r;
714
			return r;
330
		}
715
		}
331
	}
716
	}
332
	rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
717
	ring->ptr_mask = (ring->ring_size / 4) - 1;
333
	rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
718
	ring->ring_free_dw = ring->ring_size / 4;
-
 
719
	if (rdev->wb.enabled) {
-
 
720
		u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
-
 
721
		ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
-
 
722
		ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
-
 
723
	}
-
 
724
	if (radeon_debugfs_ring_init(rdev, ring)) {
-
 
725
		DRM_ERROR("Failed to register debugfs file for rings !\n");
-
 
726
	}
-
 
727
	radeon_ring_lockup_update(ring);
334
	return 0;
728
	return 0;
335
}
729
}
-
 
730
 
-
 
731
/**
-
 
732
 * radeon_ring_fini - tear down the driver ring struct.
-
 
733
 *
-
 
734
 * @rdev: radeon_device pointer
-
 
735
 * @ring: radeon_ring structure holding ring information
-
 
736
 *
-
 
737
 * Tear down the driver information for the selected ring (all asics).
336
 
738
 */
337
void radeon_ring_fini(struct radeon_device *rdev)
739
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
338
{
740
{
339
	int r;
741
	int r;
340
	struct radeon_bo *ring_obj;
742
	struct radeon_bo *ring_obj;
341
 
743
 
342
	mutex_lock(&rdev->cp.mutex);
744
	mutex_lock(&rdev->ring_lock);
-
 
745
	ring_obj = ring->ring_obj;
343
	ring_obj = rdev->cp.ring_obj;
746
	ring->ready = false;
344
	rdev->cp.ring = NULL;
747
	ring->ring = NULL;
345
	rdev->cp.ring_obj = NULL;
748
	ring->ring_obj = NULL;
346
	mutex_unlock(&rdev->cp.mutex);
749
	mutex_unlock(&rdev->ring_lock);
347
 
750
 
348
	if (ring_obj) {
751
	if (ring_obj) {
349
		r = radeon_bo_reserve(ring_obj, false);
752
		r = radeon_bo_reserve(ring_obj, false);
350
		if (likely(r == 0)) {
753
		if (likely(r == 0)) {
351
			radeon_bo_kunmap(ring_obj);
754
			radeon_bo_kunmap(ring_obj);
352
			radeon_bo_unpin(ring_obj);
755
			radeon_bo_unpin(ring_obj);
353
			radeon_bo_unreserve(ring_obj);
756
			radeon_bo_unreserve(ring_obj);
354
		}
757
		}
355
		radeon_bo_unref(&ring_obj);
758
		radeon_bo_unref(&ring_obj);
356
	}
759
	}
357
}
760
}
358
 
-
 
359
 
761
 
360
/*
762
/*
361
 * Debugfs info
763
 * Debugfs info
362
 */
764
 */
363
#if defined(CONFIG_DEBUG_FS)
765
#if defined(CONFIG_DEBUG_FS)
-
 
766
 
364
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
767
static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
365
{
768
{
366
	struct drm_info_node *node = (struct drm_info_node *) m->private;
769
	struct drm_info_node *node = (struct drm_info_node *) m->private;
-
 
770
	struct drm_device *dev = node->minor->dev;
-
 
771
	struct radeon_device *rdev = dev->dev_private;
367
	struct radeon_ib *ib = node->info_ent->data;
772
	int ridx = *(int*)node->info_ent->data;
-
 
773
	struct radeon_ring *ring = &rdev->ring[ridx];
368
	unsigned i;
774
	unsigned count, i, j;
369
 
775
 
-
 
776
	radeon_ring_free_size(rdev, ring);
-
 
777
	count = (ring->ring_size / 4) - ring->ring_free_dw;
-
 
778
	seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
-
 
779
	seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
370
	if (ib == NULL) {
780
	if (ring->rptr_save_reg) {
-
 
781
		seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
371
		return 0;
782
			   RREG32(ring->rptr_save_reg));
372
	}
783
	}
373
	seq_printf(m, "IB %04u\n", ib->idx);
784
	seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
374
	seq_printf(m, "IB fence %p\n", ib->fence);
785
	seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
375
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
786
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
-
 
787
	seq_printf(m, "%u dwords in ring\n", count);
-
 
788
	i = ring->rptr;
376
	for (i = 0; i < ib->length_dw; i++) {
789
	for (j = 0; j <= count; j++) {
377
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
790
		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
-
 
791
		i = (i + 1) & ring->ptr_mask;
378
	}
792
	}
379
	return 0;
793
	return 0;
380
}
794
}
-
 
795
 
-
 
796
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
-
 
797
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
-
 
798
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
-
 
799
 
-
 
800
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
-
 
801
	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
-
 
802
	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
-
 
803
	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
-
 
804
};
381
 
805
 
382
static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
806
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
383
{
807
{
384
	struct drm_info_node *node = (struct drm_info_node *) m->private;
808
	struct drm_info_node *node = (struct drm_info_node *) m->private;
385
	struct radeon_device *rdev = node->info_ent->data;
809
	struct drm_device *dev = node->minor->dev;
-
 
810
	struct radeon_device *rdev = dev->dev_private;
386
	struct radeon_ib *ib;
811
 
387
	unsigned i;
-
 
388
 
-
 
389
	mutex_lock(&rdev->ib_pool.mutex);
-
 
390
	if (list_empty(&rdev->ib_pool.bogus_ib)) {
-
 
391
		mutex_unlock(&rdev->ib_pool.mutex);
-
 
392
		seq_printf(m, "no bogus IB recorded\n");
-
 
393
		return 0;
-
 
394
	}
-
 
395
	ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
-
 
396
	list_del_init(&ib->list);
-
 
397
	mutex_unlock(&rdev->ib_pool.mutex);
-
 
398
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
-
 
399
	for (i = 0; i < ib->length_dw; i++) {
-
 
400
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
-
 
401
	}
-
 
402
	vfree(ib->ptr);
812
	radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
403
	kfree(ib);
-
 
404
	return 0;
-
 
405
}
-
 
-
 
813
 
406
 
814
	return 0;
407
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
815
 
408
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
816
}
-
 
817
 
409
 
818
static struct drm_info_list radeon_debugfs_sa_list[] = {
410
static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
819
        {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
411
	{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
820
};
412
};
821
 
413
#endif
822
#endif
414
 
823
 
415
int radeon_debugfs_ib_init(struct radeon_device *rdev)
824
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
416
{
825
{
417
#if defined(CONFIG_DEBUG_FS)
826
#if defined(CONFIG_DEBUG_FS)
418
	unsigned i;
827
	unsigned i;
-
 
828
	for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
-
 
829
		struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
-
 
830
		int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
419
	int r;
831
		unsigned r;
-
 
832
 
-
 
833
		if (&rdev->ring[ridx] != ring)
-
 
834
			continue;
420
 
-
 
421
	radeon_debugfs_ib_bogus_info_list[0].data = rdev;
835
 
422
	r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
836
		r = radeon_debugfs_add_files(rdev, info, 1);
423
	if (r)
837
	if (r)
424
		return r;
-
 
425
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
-
 
426
		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
-
 
427
		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
-
 
428
		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
-
 
429
		radeon_debugfs_ib_list[i].driver_features = 0;
-
 
430
		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
838
		return r;
-
 
839
	}
-
 
840
#endif
-
 
841
	return 0;
-
 
842
}
431
	}
843
 
-
 
844
static int radeon_debugfs_sa_init(struct radeon_device *rdev)
432
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
845
{
-
 
846
#if defined(CONFIG_DEBUG_FS)
433
					RADEON_IB_POOL_SIZE);
847
	return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
434
#else
848
#else
435
	return 0;
849
	return 0;
436
#endif
850
#endif
437
}
851
}