Subversion Repositories Kolibri OS

Rev

Rev 1404 | Rev 1430 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
1179 serge 28
#include 
1125 serge 29
#include "drmP.h"
1117 serge 30
#include "radeon_drm.h"
31
#include "radeon_reg.h"
32
#include "radeon.h"
33
#include "atom.h"
34
 
35
int radeon_debugfs_ib_init(struct radeon_device *rdev);
36
 
37
/*
38
 * IB.
39
 */
1120 serge 40
 
41
#if 0
42
 
1117 serge 43
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
44
{
45
	struct radeon_fence *fence;
46
	struct radeon_ib *nib;
1428 serge 47
	int r = 0, i, c;
1117 serge 48
 
49
	*ib = NULL;
50
	r = radeon_fence_create(rdev, &fence);
51
	if (r) {
1428 serge 52
		dev_err(rdev->dev, "failed to create fence for new IB\n");
1117 serge 53
		return r;
54
	}
55
    mutex_lock(&rdev->ib_pool.mutex);
1428 serge 56
	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
57
		i &= (RADEON_IB_POOL_SIZE - 1);
58
		if (rdev->ib_pool.ibs[i].free) {
59
			nib = &rdev->ib_pool.ibs[i];
60
			break;
1117 serge 61
	}
1428 serge 62
	}
63
	if (nib == NULL) {
64
		/* This should never happen, it means we allocated all
65
		 * IB and haven't scheduled one yet, return EBUSY to
66
		 * userspace hoping that on ioctl recall we get better
67
		 * luck
68
		 */
69
		dev_err(rdev->dev, "no free indirect buffer !\n");
1179 serge 70
		mutex_unlock(&rdev->ib_pool.mutex);
1428 serge 71
		radeon_fence_unref(&fence);
72
		return -EBUSY;
1117 serge 73
	}
1428 serge 74
	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
75
	nib->free = false;
76
	if (nib->fence) {
1179 serge 77
		mutex_unlock(&rdev->ib_pool.mutex);
1117 serge 78
	r = radeon_fence_wait(nib->fence, false);
79
	if (r) {
1428 serge 80
			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
81
				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
82
			mutex_lock(&rdev->ib_pool.mutex);
83
			nib->free = true;
84
			mutex_unlock(&rdev->ib_pool.mutex);
85
			radeon_fence_unref(&fence);
86
			return r;
1117 serge 87
	}
1428 serge 88
		mutex_lock(&rdev->ib_pool.mutex);
89
	}
1117 serge 90
	radeon_fence_unref(&nib->fence);
1428 serge 91
	nib->fence = fence;
1117 serge 92
	nib->length_dw = 0;
1179 serge 93
	mutex_unlock(&rdev->ib_pool.mutex);
1117 serge 94
	*ib = nib;
1428 serge 95
	return 0;
1117 serge 96
}
97
 
98
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
99
{
100
	struct radeon_ib *tmp = *ib;
101
 
102
	*ib = NULL;
103
	if (tmp == NULL) {
104
		return;
105
	}
1428 serge 106
	if (!tmp->fence->emited)
107
		radeon_fence_unref(&tmp->fence);
1117 serge 108
	mutex_lock(&rdev->ib_pool.mutex);
1428 serge 109
	tmp->free = true;
1117 serge 110
	mutex_unlock(&rdev->ib_pool.mutex);
111
}
112
 
113
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
114
{
115
	int r = 0;
116
 
117
	if (!ib->length_dw || !rdev->cp.ready) {
118
		/* TODO: Nothings in the ib we should report. */
1428 serge 119
		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
1117 serge 120
		return -EINVAL;
121
	}
1179 serge 122
 
123
	/* 64 dwords should be enough for fence too */
1117 serge 124
	r = radeon_ring_lock(rdev, 64);
125
	if (r) {
126
		DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
127
		return r;
128
	}
1179 serge 129
	radeon_ring_ib_execute(rdev, ib);
1117 serge 130
	radeon_fence_emit(rdev, ib->fence);
1179 serge 131
	mutex_lock(&rdev->ib_pool.mutex);
1428 serge 132
	/* once scheduled IB is considered free and protected by the fence */
133
	ib->free = true;
1117 serge 134
	mutex_unlock(&rdev->ib_pool.mutex);
1179 serge 135
	radeon_ring_unlock_commit(rdev);
1117 serge 136
	return 0;
137
}
1120 serge 138
#endif
1117 serge 139
 
140
int radeon_ib_pool_init(struct radeon_device *rdev)
141
{
142
	void *ptr;
143
	uint64_t gpu_addr;
144
	int i;
145
	int r = 0;
146
 
1179 serge 147
	if (rdev->ib_pool.robj)
148
		return 0;
1117 serge 149
	/* Allocate 1M object buffer */
1404 serge 150
	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
1117 serge 151
				 true, RADEON_GEM_DOMAIN_GTT,
1404 serge 152
				&rdev->ib_pool.robj);
1117 serge 153
	if (r) {
154
		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
155
		return r;
156
	}
1404 serge 157
	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
158
	if (unlikely(r != 0))
159
		return r;
160
	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
1117 serge 161
	if (r) {
1404 serge 162
		radeon_bo_unreserve(rdev->ib_pool.robj);
1117 serge 163
		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
164
		return r;
165
	}
1404 serge 166
	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
167
	radeon_bo_unreserve(rdev->ib_pool.robj);
1117 serge 168
	if (r) {
169
		DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
170
		return r;
171
	}
172
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
173
		unsigned offset;
174
 
175
		offset = i * 64 * 1024;
176
		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
177
		rdev->ib_pool.ibs[i].ptr = ptr + offset;
178
		rdev->ib_pool.ibs[i].idx = i;
179
		rdev->ib_pool.ibs[i].length_dw = 0;
1428 serge 180
		rdev->ib_pool.ibs[i].free = true;
1117 serge 181
	}
1428 serge 182
	rdev->ib_pool.head_id = 0;
1117 serge 183
	rdev->ib_pool.ready = true;
184
	DRM_INFO("radeon: ib pool ready.\n");
1129 serge 185
	if (radeon_debugfs_ib_init(rdev)) {
186
		DRM_ERROR("Failed to register debugfs file for IB !\n");
187
	}
1117 serge 188
	return r;
189
}
190
 
191
void radeon_ib_pool_fini(struct radeon_device *rdev)
192
{
1404 serge 193
	int r;
194
 
1117 serge 195
	if (!rdev->ib_pool.ready) {
196
		return;
197
	}
1179 serge 198
	mutex_lock(&rdev->ib_pool.mutex);
1117 serge 199
	if (rdev->ib_pool.robj) {
1404 serge 200
		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
201
		if (likely(r == 0)) {
202
			radeon_bo_kunmap(rdev->ib_pool.robj);
203
			radeon_bo_unpin(rdev->ib_pool.robj);
204
			radeon_bo_unreserve(rdev->ib_pool.robj);
205
		}
206
		radeon_bo_unref(&rdev->ib_pool.robj);
1117 serge 207
		rdev->ib_pool.robj = NULL;
208
	}
1179 serge 209
	mutex_unlock(&rdev->ib_pool.mutex);
1117 serge 210
}
211
 
1120 serge 212
 
1117 serge 213
/*
214
 * Ring.
215
 */
216
void radeon_ring_free_size(struct radeon_device *rdev)
217
{
1179 serge 218
	if (rdev->family >= CHIP_R600)
219
		rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
220
	else
1117 serge 221
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
222
	/* This works because ring_size is a power of 2 */
223
	rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
224
	rdev->cp.ring_free_dw -= rdev->cp.wptr;
225
	rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
226
	if (!rdev->cp.ring_free_dw) {
227
		rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
228
	}
229
}
230
 
231
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
232
{
233
	int r;
234
 
235
	/* Align requested size with padding so unlock_commit can
236
	 * pad safely */
237
	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
1179 serge 238
	mutex_lock(&rdev->cp.mutex);
1117 serge 239
	while (ndw > (rdev->cp.ring_free_dw - 1)) {
240
		radeon_ring_free_size(rdev);
241
		if (ndw < rdev->cp.ring_free_dw) {
242
			break;
243
		}
244
//        r = radeon_fence_wait_next(rdev);
245
//       if (r) {
246
//           mutex_unlock(&rdev->cp.mutex);
247
//           return r;
248
//       }
249
	}
250
	rdev->cp.count_dw = ndw;
251
	rdev->cp.wptr_old = rdev->cp.wptr;
252
	return 0;
253
}
254
 
255
void radeon_ring_unlock_commit(struct radeon_device *rdev)
256
{
257
	unsigned count_dw_pad;
258
	unsigned i;
259
 
260
	/* We pad to match fetch size */
261
	count_dw_pad = (rdev->cp.align_mask + 1) -
262
		       (rdev->cp.wptr & rdev->cp.align_mask);
263
	for (i = 0; i < count_dw_pad; i++) {
1179 serge 264
		radeon_ring_write(rdev, 2 << 30);
1117 serge 265
	}
266
	DRM_MEMORYBARRIER();
1179 serge 267
	radeon_cp_commit(rdev);
268
	mutex_unlock(&rdev->cp.mutex);
1117 serge 269
}
270
 
271
void radeon_ring_unlock_undo(struct radeon_device *rdev)
272
{
273
	rdev->cp.wptr = rdev->cp.wptr_old;
1179 serge 274
	mutex_unlock(&rdev->cp.mutex);
1117 serge 275
}
276
 
277
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
278
{
279
	int r;
280
 
1179 serge 281
    ENTER();
1117 serge 282
 
283
	rdev->cp.ring_size = ring_size;
1120 serge 284
    /* Allocate ring buffer */
1117 serge 285
	if (rdev->cp.ring_obj == NULL) {
1404 serge 286
		r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
1117 serge 287
					 RADEON_GEM_DOMAIN_GTT,
288
					 &rdev->cp.ring_obj);
289
		if (r) {
1404 serge 290
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
1117 serge 291
			return r;
292
		}
1404 serge 293
		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
294
		if (unlikely(r != 0))
295
			return r;
296
		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
1117 serge 297
				      &rdev->cp.gpu_addr);
298
		if (r) {
1404 serge 299
			radeon_bo_unreserve(rdev->cp.ring_obj);
300
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
1117 serge 301
			return r;
302
		}
1404 serge 303
		r = radeon_bo_kmap(rdev->cp.ring_obj,
1117 serge 304
				       (void **)&rdev->cp.ring);
1404 serge 305
		radeon_bo_unreserve(rdev->cp.ring_obj);
1117 serge 306
		if (r) {
1404 serge 307
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
1117 serge 308
			return r;
309
		}
310
	}
311
	rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
312
	rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
1119 serge 313
 
1179 serge 314
    LEAVE();
1119 serge 315
 
1117 serge 316
	return 0;
317
}
318
 
319
void radeon_ring_fini(struct radeon_device *rdev)
320
{
1404 serge 321
	int r;
322
 
1179 serge 323
	mutex_lock(&rdev->cp.mutex);
1117 serge 324
   if (rdev->cp.ring_obj) {
1404 serge 325
		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
326
		if (likely(r == 0)) {
327
			radeon_bo_kunmap(rdev->cp.ring_obj);
328
			radeon_bo_unpin(rdev->cp.ring_obj);
329
			radeon_bo_unreserve(rdev->cp.ring_obj);
330
		}
331
		radeon_bo_unref(&rdev->cp.ring_obj);
1117 serge 332
       rdev->cp.ring = NULL;
333
		rdev->cp.ring_obj = NULL;
334
	}
1179 serge 335
	mutex_unlock(&rdev->cp.mutex);
1117 serge 336
}
337
 
338
 
339
/*
340
 * Debugfs info
341
 */
342
#if defined(CONFIG_DEBUG_FS)
343
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
344
{
345
	struct drm_info_node *node = (struct drm_info_node *) m->private;
346
	struct radeon_ib *ib = node->info_ent->data;
347
	unsigned i;
348
 
349
	if (ib == NULL) {
350
		return 0;
351
	}
1428 serge 352
	seq_printf(m, "IB %04u\n", ib->idx);
1117 serge 353
	seq_printf(m, "IB fence %p\n", ib->fence);
354
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
355
	for (i = 0; i < ib->length_dw; i++) {
356
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
357
	}
358
	return 0;
359
}
360
 
361
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
362
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
363
#endif
364
 
365
int radeon_debugfs_ib_init(struct radeon_device *rdev)
366
{
367
#if defined(CONFIG_DEBUG_FS)
368
	unsigned i;
369
 
370
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
371
		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
372
		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
373
		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
374
		radeon_debugfs_ib_list[i].driver_features = 0;
375
		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
376
	}
377
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
378
					RADEON_IB_POOL_SIZE);
379
#else
380
	return 0;
381
#endif
382
}