Subversion Repositories Kolibri OS

Rev

Rev 1119 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
29
//#include "drmP.h"
30
//#include "drm.h"
31
#include "radeon_drm.h"
32
#include "radeon_microcode.h"
33
 
34
#include "radeon_reg.h"
35
#include "radeon.h"
36
 
37
/* This files gather functions specifics to:
38
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
39
 *
40
 * Some of these functions might be used by newer ASICs.
41
 */
42
void r100_hdp_reset(struct radeon_device *rdev);
43
void r100_gpu_init(struct radeon_device *rdev);
44
int r100_gui_wait_for_idle(struct radeon_device *rdev);
45
int r100_mc_wait_for_idle(struct radeon_device *rdev);
46
void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
47
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
48
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
49
 
50
#if 0
51
/*
52
 * PCI GART
53
 */
54
void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
55
{
56
	/* TODO: can we do somethings here ? */
57
	/* It seems hw only cache one entry so we should discard this
58
	 * entry otherwise if first GPU GART read hit this entry it
59
	 * could end up in wrong address. */
60
}
61
 
62
int r100_pci_gart_enable(struct radeon_device *rdev)
63
{
64
	uint32_t tmp;
65
	int r;
66
 
67
	/* Initialize common gart structure */
68
	r = radeon_gart_init(rdev);
69
	if (r) {
70
		return r;
71
	}
72
	if (rdev->gart.table.ram.ptr == NULL) {
73
		rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
74
		r = radeon_gart_table_ram_alloc(rdev);
75
		if (r) {
76
			return r;
77
		}
78
	}
79
	/* discard memory request outside of configured range */
80
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
81
	WREG32(RADEON_AIC_CNTL, tmp);
82
	/* set address range for PCI address translate */
83
	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
84
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
85
	WREG32(RADEON_AIC_HI_ADDR, tmp);
86
	/* Enable bus mastering */
87
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
88
	WREG32(RADEON_BUS_CNTL, tmp);
89
	/* set PCI GART page-table base address */
90
	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
91
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
92
	WREG32(RADEON_AIC_CNTL, tmp);
93
	r100_pci_gart_tlb_flush(rdev);
94
	rdev->gart.ready = true;
95
	return 0;
96
}
97
 
98
void r100_pci_gart_disable(struct radeon_device *rdev)
99
{
100
	uint32_t tmp;
101
 
102
	/* discard memory request outside of configured range */
103
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
104
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
105
	WREG32(RADEON_AIC_LO_ADDR, 0);
106
	WREG32(RADEON_AIC_HI_ADDR, 0);
107
}
108
 
109
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
110
{
111
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
112
		return -EINVAL;
113
	}
114
	rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
115
	return 0;
116
}
117
 
118
int r100_gart_enable(struct radeon_device *rdev)
119
{
120
	if (rdev->flags & RADEON_IS_AGP) {
121
		r100_pci_gart_disable(rdev);
122
		return 0;
123
	}
124
	return r100_pci_gart_enable(rdev);
125
}
126
 
127
 
128
/*
129
 * MC
130
 */
131
void r100_mc_disable_clients(struct radeon_device *rdev)
132
{
133
	uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
134
 
135
	/* FIXME: is this function correct for rs100,rs200,rs300 ? */
136
//   if (r100_gui_wait_for_idle(rdev)) {
137
//       printk(KERN_WARNING "Failed to wait GUI idle while "
138
//              "programming pipes. Bad things might happen.\n");
139
//   }
140
 
141
	/* stop display and memory access */
142
	ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
143
	WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
144
	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
145
	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
146
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
147
 
148
	r100_gpu_wait_for_vsync(rdev);
149
 
150
	WREG32(RADEON_CRTC_GEN_CNTL,
151
	       (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
152
	       RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
153
 
154
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
155
		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
156
 
157
		r100_gpu_wait_for_vsync2(rdev);
158
		WREG32(RADEON_CRTC2_GEN_CNTL,
159
		       (crtc2_gen_cntl &
160
		        ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
161
		       RADEON_CRTC2_DISP_REQ_EN_B);
162
	}
163
 
164
	udelay(500);
165
}
166
 
167
void r100_mc_setup(struct radeon_device *rdev)
168
{
169
	uint32_t tmp;
170
	int r;
171
 
172
	r = r100_debugfs_mc_info_init(rdev);
173
	if (r) {
174
		DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
175
	}
176
	/* Write VRAM size in case we are limiting it */
177
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
178
	tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
179
	tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
180
	tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
181
	WREG32(RADEON_MC_FB_LOCATION, tmp);
182
 
183
	/* Enable bus mastering */
184
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
185
	WREG32(RADEON_BUS_CNTL, tmp);
186
 
187
	if (rdev->flags & RADEON_IS_AGP) {
188
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
189
		tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
190
		tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
191
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
192
		WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
193
	} else {
194
		WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
195
		WREG32(RADEON_AGP_BASE, 0);
196
	}
197
 
198
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
199
	tmp |= (7 << 28);
200
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
201
	(void)RREG32(RADEON_HOST_PATH_CNTL);
202
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
203
	(void)RREG32(RADEON_HOST_PATH_CNTL);
204
}
205
 
206
int r100_mc_init(struct radeon_device *rdev)
207
{
208
	int r;
209
 
210
	if (r100_debugfs_rbbm_init(rdev)) {
211
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
212
	}
213
 
214
	r100_gpu_init(rdev);
215
	/* Disable gart which also disable out of gart access */
216
	r100_pci_gart_disable(rdev);
217
 
218
	/* Setup GPU memory space */
219
	rdev->mc.vram_location = 0xFFFFFFFFUL;
220
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
221
	if (rdev->flags & RADEON_IS_AGP) {
222
		r = radeon_agp_init(rdev);
223
		if (r) {
224
			printk(KERN_WARNING "[drm] Disabling AGP\n");
225
			rdev->flags &= ~RADEON_IS_AGP;
226
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
227
		} else {
228
			rdev->mc.gtt_location = rdev->mc.agp_base;
229
		}
230
	}
231
	r = radeon_mc_setup(rdev);
232
	if (r) {
233
		return r;
234
	}
235
 
236
	r100_mc_disable_clients(rdev);
237
	if (r100_mc_wait_for_idle(rdev)) {
238
       printk(KERN_WARNING "Failed to wait MC idle while "
239
              "programming pipes. Bad things might happen.\n");
240
	}
241
 
242
	r100_mc_setup(rdev);
243
	return 0;
244
}
245
 
246
void r100_mc_fini(struct radeon_device *rdev)
247
{
248
	r100_pci_gart_disable(rdev);
249
	radeon_gart_table_ram_free(rdev);
250
	radeon_gart_fini(rdev);
251
}
252
 
253
 
254
/*
255
 * Fence emission
256
 */
257
void r100_fence_ring_emit(struct radeon_device *rdev,
258
			  struct radeon_fence *fence)
259
{
260
	/* Who ever call radeon_fence_emit should call ring_lock and ask
261
	 * for enough space (today caller are ib schedule and buffer move) */
262
	/* Wait until IDLE & CLEAN */
263
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
264
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
265
	/* Emit fence sequence & fire IRQ */
266
	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
267
	radeon_ring_write(rdev, fence->seq);
268
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
269
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
270
}
271
 
272
 
273
/*
274
 * Writeback
275
 */
276
int r100_wb_init(struct radeon_device *rdev)
277
{
278
	int r;
279
 
280
	if (rdev->wb.wb_obj == NULL) {
281
		r = radeon_object_create(rdev, NULL, 4096,
282
					 true,
283
					 RADEON_GEM_DOMAIN_GTT,
284
					 false, &rdev->wb.wb_obj);
285
		if (r) {
286
			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
287
			return r;
288
		}
289
		r = radeon_object_pin(rdev->wb.wb_obj,
290
				      RADEON_GEM_DOMAIN_GTT,
291
				      &rdev->wb.gpu_addr);
292
		if (r) {
293
			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
294
			return r;
295
		}
296
		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
297
		if (r) {
298
			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
299
			return r;
300
		}
301
	}
302
	WREG32(0x774, rdev->wb.gpu_addr);
303
	WREG32(0x70C, rdev->wb.gpu_addr + 1024);
304
	WREG32(0x770, 0xff);
305
	return 0;
306
}
307
 
308
void r100_wb_fini(struct radeon_device *rdev)
309
{
310
	if (rdev->wb.wb_obj) {
311
		radeon_object_kunmap(rdev->wb.wb_obj);
312
		radeon_object_unpin(rdev->wb.wb_obj);
313
		radeon_object_unref(&rdev->wb.wb_obj);
314
		rdev->wb.wb = NULL;
315
		rdev->wb.wb_obj = NULL;
316
	}
317
}
318
 
319
int r100_copy_blit(struct radeon_device *rdev,
320
		   uint64_t src_offset,
321
		   uint64_t dst_offset,
322
		   unsigned num_pages,
323
		   struct radeon_fence *fence)
324
{
325
	uint32_t cur_pages;
326
	uint32_t stride_bytes = PAGE_SIZE;
327
	uint32_t pitch;
328
	uint32_t stride_pixels;
329
	unsigned ndw;
330
	int num_loops;
331
	int r = 0;
332
 
333
	/* radeon limited to 16k stride */
334
	stride_bytes &= 0x3fff;
335
	/* radeon pitch is /64 */
336
	pitch = stride_bytes / 64;
337
	stride_pixels = stride_bytes / 4;
338
	num_loops = DIV_ROUND_UP(num_pages, 8191);
339
 
340
	/* Ask for enough room for blit + flush + fence */
341
	ndw = 64 + (10 * num_loops);
342
	r = radeon_ring_lock(rdev, ndw);
343
	if (r) {
344
		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
345
		return -EINVAL;
346
	}
347
	while (num_pages > 0) {
348
		cur_pages = num_pages;
349
		if (cur_pages > 8191) {
350
			cur_pages = 8191;
351
		}
352
		num_pages -= cur_pages;
353
 
354
		/* pages are in Y direction - height
355
		   page width in X direction - width */
356
		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
357
		radeon_ring_write(rdev,
358
				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
359
				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
360
				  RADEON_GMC_SRC_CLIPPING |
361
				  RADEON_GMC_DST_CLIPPING |
362
				  RADEON_GMC_BRUSH_NONE |
363
				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
364
				  RADEON_GMC_SRC_DATATYPE_COLOR |
365
				  RADEON_ROP3_S |
366
				  RADEON_DP_SRC_SOURCE_MEMORY |
367
				  RADEON_GMC_CLR_CMP_CNTL_DIS |
368
				  RADEON_GMC_WR_MSK_DIS);
369
		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
370
		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
371
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
372
		radeon_ring_write(rdev, 0);
373
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
374
		radeon_ring_write(rdev, num_pages);
375
		radeon_ring_write(rdev, num_pages);
376
		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
377
	}
378
	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
379
	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
380
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
381
	radeon_ring_write(rdev,
382
			  RADEON_WAIT_2D_IDLECLEAN |
383
			  RADEON_WAIT_HOST_IDLECLEAN |
384
			  RADEON_WAIT_DMA_GUI_IDLE);
385
	if (fence) {
386
		r = radeon_fence_emit(rdev, fence);
387
	}
388
	radeon_ring_unlock_commit(rdev);
389
	return r;
390
}
391
 
392
 
393
/*
394
 * CP
395
 */
396
void r100_ring_start(struct radeon_device *rdev)
397
{
398
	int r;
399
 
400
	r = radeon_ring_lock(rdev, 2);
401
	if (r) {
402
		return;
403
	}
404
	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
405
	radeon_ring_write(rdev,
406
			  RADEON_ISYNC_ANY2D_IDLE3D |
407
			  RADEON_ISYNC_ANY3D_IDLE2D |
408
			  RADEON_ISYNC_WAIT_IDLEGUI |
409
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
410
	radeon_ring_unlock_commit(rdev);
411
}
412
 
413
#endif
414
 
415
static void r100_cp_load_microcode(struct radeon_device *rdev)
416
{
417
	int i;
418
 
419
    dbgprintf("%s\n\r",__FUNCTION__);
420
 
421
	if (r100_gui_wait_for_idle(rdev)) {
422
		printk(KERN_WARNING "Failed to wait GUI idle while "
423
		       "programming pipes. Bad things might happen.\n");
424
	}
425
 
426
	WREG32(RADEON_CP_ME_RAM_ADDR, 0);
427
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
428
	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
429
	    (rdev->family == CHIP_RS200)) {
430
		DRM_INFO("Loading R100 Microcode\n");
431
		for (i = 0; i < 256; i++) {
432
			WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
433
			WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
434
		}
435
	} else if ((rdev->family == CHIP_R200) ||
436
		   (rdev->family == CHIP_RV250) ||
437
		   (rdev->family == CHIP_RV280) ||
438
		   (rdev->family == CHIP_RS300)) {
439
		DRM_INFO("Loading R200 Microcode\n");
440
		for (i = 0; i < 256; i++) {
441
			WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
442
			WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
443
		}
444
	} else if ((rdev->family == CHIP_R300) ||
445
		   (rdev->family == CHIP_R350) ||
446
		   (rdev->family == CHIP_RV350) ||
447
		   (rdev->family == CHIP_RV380) ||
448
		   (rdev->family == CHIP_RS400) ||
449
		   (rdev->family == CHIP_RS480)) {
450
		DRM_INFO("Loading R300 Microcode\n");
451
		for (i = 0; i < 256; i++) {
452
			WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
453
			WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
454
		}
455
	} else if ((rdev->family == CHIP_R420) ||
456
		   (rdev->family == CHIP_R423) ||
457
		   (rdev->family == CHIP_RV410)) {
458
		DRM_INFO("Loading R400 Microcode\n");
459
		for (i = 0; i < 256; i++) {
460
			WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
461
			WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
462
		}
463
	} else if ((rdev->family == CHIP_RS690) ||
464
		   (rdev->family == CHIP_RS740)) {
465
		DRM_INFO("Loading RS690/RS740 Microcode\n");
466
		for (i = 0; i < 256; i++) {
467
			WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
468
			WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
469
		}
470
	} else if (rdev->family == CHIP_RS600) {
471
		DRM_INFO("Loading RS600 Microcode\n");
472
		for (i = 0; i < 256; i++) {
473
			WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
474
			WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
475
		}
476
	} else if ((rdev->family == CHIP_RV515) ||
477
		   (rdev->family == CHIP_R520) ||
478
		   (rdev->family == CHIP_RV530) ||
479
		   (rdev->family == CHIP_R580) ||
480
		   (rdev->family == CHIP_RV560) ||
481
		   (rdev->family == CHIP_RV570)) {
482
		DRM_INFO("Loading R500 Microcode\n");
483
		for (i = 0; i < 256; i++) {
484
			WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
485
			WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
486
		}
487
	}
488
}
489
 
490
 
491
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
492
{
493
	unsigned rb_bufsz;
494
	unsigned rb_blksz;
495
	unsigned max_fetch;
496
	unsigned pre_write_timer;
497
	unsigned pre_write_limit;
498
	unsigned indirect2_start;
499
	unsigned indirect1_start;
500
	uint32_t tmp;
501
	int r;
502
 
503
    dbgprintf("%s\n\r",__FUNCTION__);
504
 
505
//   if (r100_debugfs_cp_init(rdev)) {
506
//       DRM_ERROR("Failed to register debugfs file for CP !\n");
507
//   }
508
	/* Reset CP */
509
	tmp = RREG32(RADEON_CP_CSQ_STAT);
510
	if ((tmp & (1 << 31))) {
511
		DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
512
		WREG32(RADEON_CP_CSQ_MODE, 0);
513
		WREG32(RADEON_CP_CSQ_CNTL, 0);
514
		WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
515
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
516
		mdelay(2);
517
		WREG32(RADEON_RBBM_SOFT_RESET, 0);
518
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
519
		mdelay(2);
520
		tmp = RREG32(RADEON_CP_CSQ_STAT);
521
		if ((tmp & (1 << 31))) {
522
			DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
523
		}
524
	} else {
525
		DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
526
	}
527
	/* Align ring size */
528
	rb_bufsz = drm_order(ring_size / 8);
529
	ring_size = (1 << (rb_bufsz + 1)) * 4;
530
	r100_cp_load_microcode(rdev);
531
	r = radeon_ring_init(rdev, ring_size);
532
	if (r) {
533
		return r;
534
	}
535
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
536
	 * the rptr copy in system ram */
537
	rb_blksz = 9;
538
	/* cp will read 128bytes at a time (4 dwords) */
539
	max_fetch = 1;
540
	rdev->cp.align_mask = 16 - 1;
541
	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
542
	pre_write_timer = 64;
543
	/* Force CP_RB_WPTR write if written more than one time before the
544
	 * delay expire
545
	 */
546
	pre_write_limit = 0;
547
	/* Setup the cp cache like this (cache size is 96 dwords) :
548
	 *	RING		0  to 15
549
	 *	INDIRECT1	16 to 79
550
	 *	INDIRECT2	80 to 95
551
	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
552
	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
553
	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
554
	 * Idea being that most of the gpu cmd will be through indirect1 buffer
555
	 * so it gets the bigger cache.
556
	 */
557
	indirect2_start = 80;
558
	indirect1_start = 16;
559
	/* cp setup */
560
	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
561
	WREG32(RADEON_CP_RB_CNTL,
562
#ifdef __BIG_ENDIAN
563
	       RADEON_BUF_SWAP_32BIT |
564
#endif
565
	       REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
566
	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
567
	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
568
	       RADEON_RB_NO_UPDATE);
569
	/* Set ring address */
570
	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
571
	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
572
	/* Force read & write ptr to 0 */
573
	tmp = RREG32(RADEON_CP_RB_CNTL);
574
	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
575
	WREG32(RADEON_CP_RB_RPTR_WR, 0);
576
	WREG32(RADEON_CP_RB_WPTR, 0);
577
	WREG32(RADEON_CP_RB_CNTL, tmp);
578
	udelay(10);
579
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
580
	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
581
	/* Set cp mode to bus mastering & enable cp*/
582
	WREG32(RADEON_CP_CSQ_MODE,
583
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
584
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
585
	WREG32(0x718, 0);
586
	WREG32(0x744, 0x00004D4D);
587
	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
588
	radeon_ring_start(rdev);
589
	r = radeon_ring_test(rdev);
590
	if (r) {
591
		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
592
		return r;
593
	}
594
	rdev->cp.ready = true;
595
	return 0;
596
}
597
 
598
#if 0
599
 
600
void r100_cp_fini(struct radeon_device *rdev)
601
{
602
	/* Disable ring */
603
	rdev->cp.ready = false;
604
	WREG32(RADEON_CP_CSQ_CNTL, 0);
605
	radeon_ring_fini(rdev);
606
	DRM_INFO("radeon: cp finalized\n");
607
}
608
 
609
void r100_cp_disable(struct radeon_device *rdev)
610
{
611
	/* Disable ring */
612
	rdev->cp.ready = false;
613
	WREG32(RADEON_CP_CSQ_MODE, 0);
614
	WREG32(RADEON_CP_CSQ_CNTL, 0);
615
	if (r100_gui_wait_for_idle(rdev)) {
616
		printk(KERN_WARNING "Failed to wait GUI idle while "
617
		       "programming pipes. Bad things might happen.\n");
618
	}
619
}
620
 
621
#endif
622
 
623
int r100_cp_reset(struct radeon_device *rdev)
624
{
625
	uint32_t tmp;
626
	bool reinit_cp;
627
	int i;
628
 
629
    dbgprintf("%s\n\r",__FUNCTION__);
630
 
631
 
632
	reinit_cp = rdev->cp.ready;
633
	rdev->cp.ready = false;
634
	WREG32(RADEON_CP_CSQ_MODE, 0);
635
	WREG32(RADEON_CP_CSQ_CNTL, 0);
636
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
637
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
638
	udelay(200);
639
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
640
	/* Wait to prevent race in RBBM_STATUS */
641
	mdelay(1);
642
	for (i = 0; i < rdev->usec_timeout; i++) {
643
		tmp = RREG32(RADEON_RBBM_STATUS);
644
		if (!(tmp & (1 << 16))) {
645
			DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
646
				 tmp);
647
			if (reinit_cp) {
648
				return r100_cp_init(rdev, rdev->cp.ring_size);
649
			}
650
			return 0;
651
		}
652
		DRM_UDELAY(1);
653
	}
654
	tmp = RREG32(RADEON_RBBM_STATUS);
655
	DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
656
	return -1;
657
}
658
 
659
#if 0
660
/*
661
 * CS functions
662
 */
663
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
664
			  struct radeon_cs_packet *pkt,
665
			  const unsigned *auth, unsigned n,
666
			  radeon_packet0_check_t check)
667
{
668
	unsigned reg;
669
	unsigned i, j, m;
670
	unsigned idx;
671
	int r;
672
 
673
	idx = pkt->idx + 1;
674
	reg = pkt->reg;
675
	/* Check that register fall into register range
676
	 * determined by the number of entry (n) in the
677
	 * safe register bitmap.
678
	 */
679
	if (pkt->one_reg_wr) {
680
		if ((reg >> 7) > n) {
681
			return -EINVAL;
682
		}
683
	} else {
684
		if (((reg + (pkt->count << 2)) >> 7) > n) {
685
			return -EINVAL;
686
		}
687
	}
688
	for (i = 0; i <= pkt->count; i++, idx++) {
689
		j = (reg >> 7);
690
		m = 1 << ((reg >> 2) & 31);
691
		if (auth[j] & m) {
692
			r = check(p, pkt, idx, reg);
693
			if (r) {
694
				return r;
695
			}
696
		}
697
		if (pkt->one_reg_wr) {
698
			if (!(auth[j] & m)) {
699
				break;
700
			}
701
		} else {
702
			reg += 4;
703
		}
704
	}
705
	return 0;
706
}
707
 
708
void r100_cs_dump_packet(struct radeon_cs_parser *p,
709
			 struct radeon_cs_packet *pkt)
710
{
711
	struct radeon_cs_chunk *ib_chunk;
712
	volatile uint32_t *ib;
713
	unsigned i;
714
	unsigned idx;
715
 
716
	ib = p->ib->ptr;
717
	ib_chunk = &p->chunks[p->chunk_ib_idx];
718
	idx = pkt->idx;
719
	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
720
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
721
	}
722
}
723
 
724
/**
725
 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
726
 * @parser:	parser structure holding parsing context.
727
 * @pkt:	where to store packet informations
728
 *
729
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
730
 * if packet is bigger than remaining ib size. or if packets is unknown.
731
 **/
732
int r100_cs_packet_parse(struct radeon_cs_parser *p,
733
			 struct radeon_cs_packet *pkt,
734
			 unsigned idx)
735
{
736
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
737
	uint32_t header = ib_chunk->kdata[idx];
738
 
739
	if (idx >= ib_chunk->length_dw) {
740
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
741
			  idx, ib_chunk->length_dw);
742
		return -EINVAL;
743
	}
744
	pkt->idx = idx;
745
	pkt->type = CP_PACKET_GET_TYPE(header);
746
	pkt->count = CP_PACKET_GET_COUNT(header);
747
	switch (pkt->type) {
748
	case PACKET_TYPE0:
749
		pkt->reg = CP_PACKET0_GET_REG(header);
750
		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
751
		break;
752
	case PACKET_TYPE3:
753
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
754
		break;
755
	case PACKET_TYPE2:
756
		pkt->count = -1;
757
		break;
758
	default:
759
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
760
		return -EINVAL;
761
	}
762
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
763
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
764
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
765
		return -EINVAL;
766
	}
767
	return 0;
768
}
769
 
770
/**
771
 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
772
 * @parser:		parser structure holding parsing context.
773
 * @data:		pointer to relocation data
774
 * @offset_start:	starting offset
775
 * @offset_mask:	offset mask (to align start offset on)
776
 * @reloc:		reloc informations
777
 *
778
 * Check next packet is relocation packet3, do bo validation and compute
779
 * GPU offset using the provided start.
780
 **/
781
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
782
			      struct radeon_cs_reloc **cs_reloc)
783
{
784
	struct radeon_cs_chunk *ib_chunk;
785
	struct radeon_cs_chunk *relocs_chunk;
786
	struct radeon_cs_packet p3reloc;
787
	unsigned idx;
788
	int r;
789
 
790
	if (p->chunk_relocs_idx == -1) {
791
		DRM_ERROR("No relocation chunk !\n");
792
		return -EINVAL;
793
	}
794
	*cs_reloc = NULL;
795
	ib_chunk = &p->chunks[p->chunk_ib_idx];
796
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
797
	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
798
	if (r) {
799
		return r;
800
	}
801
	p->idx += p3reloc.count + 2;
802
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
803
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
804
			  p3reloc.idx);
805
		r100_cs_dump_packet(p, &p3reloc);
806
		return -EINVAL;
807
	}
808
	idx = ib_chunk->kdata[p3reloc.idx + 1];
809
	if (idx >= relocs_chunk->length_dw) {
810
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
811
			  idx, relocs_chunk->length_dw);
812
		r100_cs_dump_packet(p, &p3reloc);
813
		return -EINVAL;
814
	}
815
	/* FIXME: we assume reloc size is 4 dwords */
816
	*cs_reloc = p->relocs_ptr[(idx / 4)];
817
	return 0;
818
}
819
 
820
static int r100_packet0_check(struct radeon_cs_parser *p,
821
			      struct radeon_cs_packet *pkt)
822
{
823
	struct radeon_cs_chunk *ib_chunk;
824
	struct radeon_cs_reloc *reloc;
825
	volatile uint32_t *ib;
826
	uint32_t tmp;
827
	unsigned reg;
828
	unsigned i;
829
	unsigned idx;
830
	bool onereg;
831
	int r;
832
 
833
	ib = p->ib->ptr;
834
	ib_chunk = &p->chunks[p->chunk_ib_idx];
835
	idx = pkt->idx + 1;
836
	reg = pkt->reg;
837
	onereg = false;
838
	if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
839
		onereg = true;
840
	}
841
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
842
		switch (reg) {
843
		/* FIXME: only allow PACKET3 blit? easier to check for out of
844
		 * range access */
845
		case RADEON_DST_PITCH_OFFSET:
846
		case RADEON_SRC_PITCH_OFFSET:
847
			r = r100_cs_packet_next_reloc(p, &reloc);
848
			if (r) {
849
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850
					  idx, reg);
851
				r100_cs_dump_packet(p, pkt);
852
				return r;
853
			}
854
			tmp = ib_chunk->kdata[idx] & 0x003fffff;
855
			tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
856
			ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
857
			break;
858
		case RADEON_RB3D_DEPTHOFFSET:
859
		case RADEON_RB3D_COLOROFFSET:
860
		case R300_RB3D_COLOROFFSET0:
861
		case R300_ZB_DEPTHOFFSET:
862
		case R200_PP_TXOFFSET_0:
863
		case R200_PP_TXOFFSET_1:
864
		case R200_PP_TXOFFSET_2:
865
		case R200_PP_TXOFFSET_3:
866
		case R200_PP_TXOFFSET_4:
867
		case R200_PP_TXOFFSET_5:
868
		case RADEON_PP_TXOFFSET_0:
869
		case RADEON_PP_TXOFFSET_1:
870
		case RADEON_PP_TXOFFSET_2:
871
		case R300_TX_OFFSET_0:
872
		case R300_TX_OFFSET_0+4:
873
		case R300_TX_OFFSET_0+8:
874
		case R300_TX_OFFSET_0+12:
875
		case R300_TX_OFFSET_0+16:
876
		case R300_TX_OFFSET_0+20:
877
		case R300_TX_OFFSET_0+24:
878
		case R300_TX_OFFSET_0+28:
879
		case R300_TX_OFFSET_0+32:
880
		case R300_TX_OFFSET_0+36:
881
		case R300_TX_OFFSET_0+40:
882
		case R300_TX_OFFSET_0+44:
883
		case R300_TX_OFFSET_0+48:
884
		case R300_TX_OFFSET_0+52:
885
		case R300_TX_OFFSET_0+56:
886
		case R300_TX_OFFSET_0+60:
887
			r = r100_cs_packet_next_reloc(p, &reloc);
888
			if (r) {
889
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
890
					  idx, reg);
891
				r100_cs_dump_packet(p, pkt);
892
				return r;
893
			}
894
			ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
895
			break;
896
		default:
897
			/* FIXME: we don't want to allow anyothers packet */
898
			break;
899
		}
900
		if (onereg) {
901
			/* FIXME: forbid onereg write to register on relocate */
902
			break;
903
		}
904
	}
905
	return 0;
906
}
907
 
908
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
909
					 struct radeon_cs_packet *pkt,
910
					 struct radeon_object *robj)
911
{
912
	struct radeon_cs_chunk *ib_chunk;
913
	unsigned idx;
914
 
915
	ib_chunk = &p->chunks[p->chunk_ib_idx];
916
	idx = pkt->idx + 1;
917
	if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
918
		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
919
			  "(need %u have %lu) !\n",
920
			  ib_chunk->kdata[idx+2] + 1,
921
			  radeon_object_size(robj));
922
		return -EINVAL;
923
	}
924
	return 0;
925
}
926
 
927
static int r100_packet3_check(struct radeon_cs_parser *p,
928
			      struct radeon_cs_packet *pkt)
929
{
930
	struct radeon_cs_chunk *ib_chunk;
931
	struct radeon_cs_reloc *reloc;
932
	unsigned idx;
933
	unsigned i, c;
934
	volatile uint32_t *ib;
935
	int r;
936
 
937
	ib = p->ib->ptr;
938
	ib_chunk = &p->chunks[p->chunk_ib_idx];
939
	idx = pkt->idx + 1;
940
	switch (pkt->opcode) {
941
	case PACKET3_3D_LOAD_VBPNTR:
942
		c = ib_chunk->kdata[idx++];
943
		for (i = 0; i < (c - 1); i += 2, idx += 3) {
944
			r = r100_cs_packet_next_reloc(p, &reloc);
945
			if (r) {
946
				DRM_ERROR("No reloc for packet3 %d\n",
947
					  pkt->opcode);
948
				r100_cs_dump_packet(p, pkt);
949
				return r;
950
			}
951
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
952
			r = r100_cs_packet_next_reloc(p, &reloc);
953
			if (r) {
954
				DRM_ERROR("No reloc for packet3 %d\n",
955
					  pkt->opcode);
956
				r100_cs_dump_packet(p, pkt);
957
				return r;
958
			}
959
			ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
960
		}
961
		if (c & 1) {
962
			r = r100_cs_packet_next_reloc(p, &reloc);
963
			if (r) {
964
				DRM_ERROR("No reloc for packet3 %d\n",
965
					  pkt->opcode);
966
				r100_cs_dump_packet(p, pkt);
967
				return r;
968
			}
969
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
970
		}
971
		break;
972
	case PACKET3_INDX_BUFFER:
973
		r = r100_cs_packet_next_reloc(p, &reloc);
974
		if (r) {
975
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
976
			r100_cs_dump_packet(p, pkt);
977
			return r;
978
		}
979
		ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
980
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
981
		if (r) {
982
			return r;
983
		}
984
		break;
985
	case 0x23:
986
		/* FIXME: cleanup */
987
		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
988
		r = r100_cs_packet_next_reloc(p, &reloc);
989
		if (r) {
990
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
991
			r100_cs_dump_packet(p, pkt);
992
			return r;
993
		}
994
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
995
		break;
996
	case PACKET3_3D_DRAW_IMMD:
997
		/* triggers drawing using in-packet vertex data */
998
	case PACKET3_3D_DRAW_IMMD_2:
999
		/* triggers drawing using in-packet vertex data */
1000
	case PACKET3_3D_DRAW_VBUF_2:
1001
		/* triggers drawing of vertex buffers setup elsewhere */
1002
	case PACKET3_3D_DRAW_INDX_2:
1003
		/* triggers drawing using indices to vertex buffer */
1004
	case PACKET3_3D_DRAW_VBUF:
1005
		/* triggers drawing of vertex buffers setup elsewhere */
1006
	case PACKET3_3D_DRAW_INDX:
1007
		/* triggers drawing using indices to vertex buffer */
1008
	case PACKET3_NOP:
1009
		break;
1010
	default:
1011
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1012
		return -EINVAL;
1013
	}
1014
	return 0;
1015
}
1016
 
1017
int r100_cs_parse(struct radeon_cs_parser *p)
1018
{
1019
	struct radeon_cs_packet pkt;
1020
	int r;
1021
 
1022
	do {
1023
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1024
		if (r) {
1025
			return r;
1026
		}
1027
		p->idx += pkt.count + 2;
1028
		switch (pkt.type) {
1029
			case PACKET_TYPE0:
1030
				r = r100_packet0_check(p, &pkt);
1031
				break;
1032
			case PACKET_TYPE2:
1033
				break;
1034
			case PACKET_TYPE3:
1035
				r = r100_packet3_check(p, &pkt);
1036
				break;
1037
			default:
1038
				DRM_ERROR("Unknown packet type %d !\n",
1039
					  pkt.type);
1040
				return -EINVAL;
1041
		}
1042
		if (r) {
1043
			return r;
1044
		}
1045
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1046
	return 0;
1047
}
1048
 
1049
 
1050
/*
1051
 * Global GPU functions
1052
 */
1053
void r100_errata(struct radeon_device *rdev)
1054
{
1055
	rdev->pll_errata = 0;
1056
 
1057
	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1058
		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1059
	}
1060
 
1061
	if (rdev->family == CHIP_RV100 ||
1062
	    rdev->family == CHIP_RS100 ||
1063
	    rdev->family == CHIP_RS200) {
1064
		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1065
	}
1066
}
1067
 
1068
#endif
1069
 
1070
 
1071
/* Wait for vertical sync on primary CRTC */
1072
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1073
{
1074
	uint32_t crtc_gen_cntl, tmp;
1075
	int i;
1076
 
1077
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1078
	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1079
	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1080
		return;
1081
	}
1082
	/* Clear the CRTC_VBLANK_SAVE bit */
1083
	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1084
	for (i = 0; i < rdev->usec_timeout; i++) {
1085
		tmp = RREG32(RADEON_CRTC_STATUS);
1086
		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1087
			return;
1088
		}
1089
		DRM_UDELAY(1);
1090
	}
1091
}
1092
 
1093
/* Wait for vertical sync on secondary CRTC */
1094
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1095
{
1096
	uint32_t crtc2_gen_cntl, tmp;
1097
	int i;
1098
 
1099
	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1100
	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1101
	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1102
		return;
1103
 
1104
	/* Clear the CRTC_VBLANK_SAVE bit */
1105
	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1106
	for (i = 0; i < rdev->usec_timeout; i++) {
1107
		tmp = RREG32(RADEON_CRTC2_STATUS);
1108
		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1109
			return;
1110
		}
1111
		DRM_UDELAY(1);
1112
	}
1113
}
1114
 
1115
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1116
{
1117
	unsigned i;
1118
	uint32_t tmp;
1119
 
1120
	for (i = 0; i < rdev->usec_timeout; i++) {
1121
		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1122
		if (tmp >= n) {
1123
			return 0;
1124
		}
1125
		DRM_UDELAY(1);
1126
	}
1127
	return -1;
1128
}
1129
 
1130
int r100_gui_wait_for_idle(struct radeon_device *rdev)
1131
{
1132
	unsigned i;
1133
	uint32_t tmp;
1134
 
1135
	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1136
		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1137
		       " Bad things might happen.\n");
1138
	}
1139
	for (i = 0; i < rdev->usec_timeout; i++) {
1140
		tmp = RREG32(RADEON_RBBM_STATUS);
1141
		if (!(tmp & (1 << 31))) {
1142
			return 0;
1143
		}
1144
		DRM_UDELAY(1);
1145
	}
1146
	return -1;
1147
}
1148
 
1149
int r100_mc_wait_for_idle(struct radeon_device *rdev)
1150
{
1151
	unsigned i;
1152
	uint32_t tmp;
1153
 
1154
	for (i = 0; i < rdev->usec_timeout; i++) {
1155
		/* read MC_STATUS */
1156
		tmp = RREG32(0x0150);
1157
		if (tmp & (1 << 2)) {
1158
			return 0;
1159
		}
1160
		DRM_UDELAY(1);
1161
	}
1162
	return -1;
1163
}
1164
 
1165
void r100_gpu_init(struct radeon_device *rdev)
1166
{
1167
	/* TODO: anythings to do here ? pipes ? */
1168
	r100_hdp_reset(rdev);
1169
}
1170
 
1171
 
1172
void r100_hdp_reset(struct radeon_device *rdev)
1173
{
1174
	uint32_t tmp;
1175
 
1176
    dbgprintf("%s\n\r",__FUNCTION__);
1177
 
1178
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1179
	tmp |= (7 << 28);
1180
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1181
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1182
	udelay(200);
1183
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1184
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
1185
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1186
}
1187
 
1188
 
1189
int r100_rb2d_reset(struct radeon_device *rdev)
1190
{
1191
	uint32_t tmp;
1192
	int i;
1193
 
1194
    dbgprintf("%s\n\r",__FUNCTION__);
1195
 
1196
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1197
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
1198
	udelay(200);
1199
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1200
	/* Wait to prevent race in RBBM_STATUS */
1201
	mdelay(1);
1202
	for (i = 0; i < rdev->usec_timeout; i++) {
1203
		tmp = RREG32(RADEON_RBBM_STATUS);
1204
		if (!(tmp & (1 << 26))) {
1205
			DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1206
				 tmp);
1207
			return 0;
1208
		}
1209
		DRM_UDELAY(1);
1210
	}
1211
	tmp = RREG32(RADEON_RBBM_STATUS);
1212
	DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1213
	return -1;
1214
}
1215
 
1216
#if 0
1217
 
1218
int r100_gpu_reset(struct radeon_device *rdev)
1219
{
1220
	uint32_t status;
1221
 
1222
	/* reset order likely matter */
1223
	status = RREG32(RADEON_RBBM_STATUS);
1224
	/* reset HDP */
1225
	r100_hdp_reset(rdev);
1226
	/* reset rb2d */
1227
	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1228
		r100_rb2d_reset(rdev);
1229
	}
1230
	/* TODO: reset 3D engine */
1231
	/* reset CP */
1232
	status = RREG32(RADEON_RBBM_STATUS);
1233
	if (status & (1 << 16)) {
1234
		r100_cp_reset(rdev);
1235
	}
1236
	/* Check if GPU is idle */
1237
	status = RREG32(RADEON_RBBM_STATUS);
1238
	if (status & (1 << 31)) {
1239
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1240
		return -1;
1241
	}
1242
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1243
	return 0;
1244
}
1245
 
1246
 
1247
/*
1248
 * VRAM info
1249
 */
1250
static void r100_vram_get_type(struct radeon_device *rdev)
1251
{
1252
	uint32_t tmp;
1253
 
1254
	rdev->mc.vram_is_ddr = false;
1255
	if (rdev->flags & RADEON_IS_IGP)
1256
		rdev->mc.vram_is_ddr = true;
1257
	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1258
		rdev->mc.vram_is_ddr = true;
1259
	if ((rdev->family == CHIP_RV100) ||
1260
	    (rdev->family == CHIP_RS100) ||
1261
	    (rdev->family == CHIP_RS200)) {
1262
		tmp = RREG32(RADEON_MEM_CNTL);
1263
		if (tmp & RV100_HALF_MODE) {
1264
			rdev->mc.vram_width = 32;
1265
		} else {
1266
			rdev->mc.vram_width = 64;
1267
		}
1268
		if (rdev->flags & RADEON_SINGLE_CRTC) {
1269
			rdev->mc.vram_width /= 4;
1270
			rdev->mc.vram_is_ddr = true;
1271
		}
1272
	} else if (rdev->family <= CHIP_RV280) {
1273
		tmp = RREG32(RADEON_MEM_CNTL);
1274
		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1275
			rdev->mc.vram_width = 128;
1276
		} else {
1277
			rdev->mc.vram_width = 64;
1278
		}
1279
	} else {
1280
		/* newer IGPs */
1281
		rdev->mc.vram_width = 128;
1282
	}
1283
}
1284
 
1285
void r100_vram_info(struct radeon_device *rdev)
1286
{
1287
	r100_vram_get_type(rdev);
1288
 
1289
	if (rdev->flags & RADEON_IS_IGP) {
1290
		uint32_t tom;
1291
		/* read NB_TOM to get the amount of ram stolen for the GPU */
1292
		tom = RREG32(RADEON_NB_TOM);
1293
		rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1294
		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1295
	} else {
1296
		rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1297
		/* Some production boards of m6 will report 0
1298
		 * if it's 8 MB
1299
		 */
1300
		if (rdev->mc.vram_size == 0) {
1301
			rdev->mc.vram_size = 8192 * 1024;
1302
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1303
		}
1304
	}
1305
 
1306
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1307
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1308
}
1309
 
1310
 
1311
/*
1312
 * Indirect registers accessor
1313
 */
1314
void r100_pll_errata_after_index(struct radeon_device *rdev)
1315
{
1316
	if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1317
		return;
1318
	}
1319
	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
1320
	(void)RREG32(RADEON_CRTC_GEN_CNTL);
1321
}
1322
 
1323
static void r100_pll_errata_after_data(struct radeon_device *rdev)
1324
{
1325
	/* This workarounds is necessary on RV100, RS100 and RS200 chips
1326
	 * or the chip could hang on a subsequent access
1327
	 */
1328
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1329
		udelay(5000);
1330
	}
1331
 
1332
	/* This function is required to workaround a hardware bug in some (all?)
1333
	 * revisions of the R300.  This workaround should be called after every
1334
	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
1335
	 * may not be correct.
1336
	 */
1337
	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1338
		uint32_t save, tmp;
1339
 
1340
		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1341
		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1342
		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1343
		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1344
		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1345
	}
1346
}
1347
 
1348
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1349
{
1350
	uint32_t data;
1351
 
1352
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1353
	r100_pll_errata_after_index(rdev);
1354
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
1355
	r100_pll_errata_after_data(rdev);
1356
	return data;
1357
}
1358
 
1359
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1360
{
1361
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1362
	r100_pll_errata_after_index(rdev);
1363
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
1364
	r100_pll_errata_after_data(rdev);
1365
}
1366
 
1367
#endif
1368
 
1369
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1370
{
1371
	if (reg < 0x10000)
1372
		return readl(((void __iomem *)rdev->rmmio) + reg);
1373
	else {
1374
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1375
		return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1376
	}
1377
}
1378
 
1379
 
1380
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1381
{
1382
	if (reg < 0x10000)
1383
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
1384
	else {
1385
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1386
		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1387
	}
1388
}
1389
 
1390
int r100_init(struct radeon_device *rdev)
1391
{
1392
	return 0;
1393
}
1394