Subversion Repositories Kolibri OS

Rev

Rev 1120 | Rev 1128 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
1125 serge 29
#include "drmP.h"
30
#include "drm.h"
1117 serge 31
#include "radeon_drm.h"
32
#include "radeon_microcode.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
35
 
36
/* This files gather functions specifics to:
37
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
38
 *
39
 * Some of these functions might be used by newer ASICs.
40
 */
41
void r100_hdp_reset(struct radeon_device *rdev);
42
void r100_gpu_init(struct radeon_device *rdev);
43
int r100_gui_wait_for_idle(struct radeon_device *rdev);
44
int r100_mc_wait_for_idle(struct radeon_device *rdev);
45
void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
46
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
47
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
48
 
49
#if 0
50
/*
51
 * PCI GART
52
 */
53
void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
54
{
55
	/* TODO: can we do somethings here ? */
56
	/* It seems hw only cache one entry so we should discard this
57
	 * entry otherwise if first GPU GART read hit this entry it
58
	 * could end up in wrong address. */
59
}
60
 
61
int r100_pci_gart_enable(struct radeon_device *rdev)
62
{
63
	uint32_t tmp;
64
	int r;
65
 
66
	/* Initialize common gart structure */
67
	r = radeon_gart_init(rdev);
68
	if (r) {
69
		return r;
70
	}
71
	if (rdev->gart.table.ram.ptr == NULL) {
72
		rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
73
		r = radeon_gart_table_ram_alloc(rdev);
74
		if (r) {
75
			return r;
76
		}
77
	}
78
	/* discard memory request outside of configured range */
79
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
80
	WREG32(RADEON_AIC_CNTL, tmp);
81
	/* set address range for PCI address translate */
82
	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
83
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
84
	WREG32(RADEON_AIC_HI_ADDR, tmp);
85
	/* Enable bus mastering */
86
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
87
	WREG32(RADEON_BUS_CNTL, tmp);
88
	/* set PCI GART page-table base address */
89
	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
90
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
91
	WREG32(RADEON_AIC_CNTL, tmp);
92
	r100_pci_gart_tlb_flush(rdev);
93
	rdev->gart.ready = true;
94
	return 0;
95
}
96
 
97
void r100_pci_gart_disable(struct radeon_device *rdev)
98
{
99
	uint32_t tmp;
100
 
101
	/* discard memory request outside of configured range */
102
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
103
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
104
	WREG32(RADEON_AIC_LO_ADDR, 0);
105
	WREG32(RADEON_AIC_HI_ADDR, 0);
106
}
107
 
108
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
109
{
110
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
111
		return -EINVAL;
112
	}
113
	rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
114
	return 0;
115
}
116
 
117
int r100_gart_enable(struct radeon_device *rdev)
118
{
119
	if (rdev->flags & RADEON_IS_AGP) {
120
		r100_pci_gart_disable(rdev);
121
		return 0;
122
	}
123
	return r100_pci_gart_enable(rdev);
124
}
125
 
126
 
127
/*
128
 * MC
129
 */
130
void r100_mc_disable_clients(struct radeon_device *rdev)
131
{
132
	uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
133
 
134
	/* FIXME: is this function correct for rs100,rs200,rs300 ? */
135
//   if (r100_gui_wait_for_idle(rdev)) {
136
//       printk(KERN_WARNING "Failed to wait GUI idle while "
137
//              "programming pipes. Bad things might happen.\n");
138
//   }
139
 
140
	/* stop display and memory access */
141
	ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
142
	WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
143
	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
144
	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
145
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
146
 
147
	r100_gpu_wait_for_vsync(rdev);
148
 
149
	WREG32(RADEON_CRTC_GEN_CNTL,
150
	       (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
151
	       RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
152
 
153
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
154
		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
155
 
156
		r100_gpu_wait_for_vsync2(rdev);
157
		WREG32(RADEON_CRTC2_GEN_CNTL,
158
		       (crtc2_gen_cntl &
159
		        ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
160
		       RADEON_CRTC2_DISP_REQ_EN_B);
161
	}
162
 
163
	udelay(500);
164
}
165
 
166
void r100_mc_setup(struct radeon_device *rdev)
167
{
168
	uint32_t tmp;
169
	int r;
170
 
171
	r = r100_debugfs_mc_info_init(rdev);
172
	if (r) {
173
		DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174
	}
175
	/* Write VRAM size in case we are limiting it */
176
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
177
	tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
178
	tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179
	tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180
	WREG32(RADEON_MC_FB_LOCATION, tmp);
181
 
182
	/* Enable bus mastering */
183
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
184
	WREG32(RADEON_BUS_CNTL, tmp);
185
 
186
	if (rdev->flags & RADEON_IS_AGP) {
187
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
188
		tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
189
		tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
190
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
191
		WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
192
	} else {
193
		WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
194
		WREG32(RADEON_AGP_BASE, 0);
195
	}
196
 
197
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
198
	tmp |= (7 << 28);
199
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
200
	(void)RREG32(RADEON_HOST_PATH_CNTL);
201
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
202
	(void)RREG32(RADEON_HOST_PATH_CNTL);
203
}
204
 
205
int r100_mc_init(struct radeon_device *rdev)
206
{
207
	int r;
208
 
209
	if (r100_debugfs_rbbm_init(rdev)) {
210
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
211
	}
212
 
213
	r100_gpu_init(rdev);
214
	/* Disable gart which also disable out of gart access */
215
	r100_pci_gart_disable(rdev);
216
 
217
	/* Setup GPU memory space */
218
	rdev->mc.vram_location = 0xFFFFFFFFUL;
219
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
220
	if (rdev->flags & RADEON_IS_AGP) {
221
		r = radeon_agp_init(rdev);
222
		if (r) {
223
			printk(KERN_WARNING "[drm] Disabling AGP\n");
224
			rdev->flags &= ~RADEON_IS_AGP;
225
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
226
		} else {
227
			rdev->mc.gtt_location = rdev->mc.agp_base;
228
		}
229
	}
230
	r = radeon_mc_setup(rdev);
231
	if (r) {
232
		return r;
233
	}
234
 
235
	r100_mc_disable_clients(rdev);
236
	if (r100_mc_wait_for_idle(rdev)) {
237
       printk(KERN_WARNING "Failed to wait MC idle while "
238
              "programming pipes. Bad things might happen.\n");
239
	}
240
 
241
	r100_mc_setup(rdev);
242
	return 0;
243
}
244
 
245
void r100_mc_fini(struct radeon_device *rdev)
246
{
247
	r100_pci_gart_disable(rdev);
248
	radeon_gart_table_ram_free(rdev);
249
	radeon_gart_fini(rdev);
250
}
251
 
252
 
253
/*
254
 * Fence emission
255
 */
256
void r100_fence_ring_emit(struct radeon_device *rdev,
257
			  struct radeon_fence *fence)
258
{
259
	/* Who ever call radeon_fence_emit should call ring_lock and ask
260
	 * for enough space (today caller are ib schedule and buffer move) */
261
	/* Wait until IDLE & CLEAN */
262
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
263
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
264
	/* Emit fence sequence & fire IRQ */
265
	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
266
	radeon_ring_write(rdev, fence->seq);
267
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
268
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
269
}
270
 
1120 serge 271
#endif
1117 serge 272
 
273
/*
274
 * Writeback
275
 */
276
int r100_wb_init(struct radeon_device *rdev)
277
{
278
	int r;
279
 
280
	if (rdev->wb.wb_obj == NULL) {
281
		r = radeon_object_create(rdev, NULL, 4096,
282
					 true,
283
					 RADEON_GEM_DOMAIN_GTT,
284
					 false, &rdev->wb.wb_obj);
285
		if (r) {
286
			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
287
			return r;
288
		}
289
		r = radeon_object_pin(rdev->wb.wb_obj,
290
				      RADEON_GEM_DOMAIN_GTT,
291
				      &rdev->wb.gpu_addr);
292
		if (r) {
293
			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
294
			return r;
295
		}
296
		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
297
		if (r) {
298
			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
299
			return r;
300
		}
301
	}
302
	WREG32(0x774, rdev->wb.gpu_addr);
303
	WREG32(0x70C, rdev->wb.gpu_addr + 1024);
304
	WREG32(0x770, 0xff);
305
	return 0;
306
}
307
 
308
void r100_wb_fini(struct radeon_device *rdev)
309
{
310
	if (rdev->wb.wb_obj) {
1120 serge 311
//       radeon_object_kunmap(rdev->wb.wb_obj);
312
//       radeon_object_unpin(rdev->wb.wb_obj);
313
//       radeon_object_unref(&rdev->wb.wb_obj);
1117 serge 314
		rdev->wb.wb = NULL;
315
		rdev->wb.wb_obj = NULL;
316
	}
317
}
318
 
1120 serge 319
 
320
#if 0
1117 serge 321
int r100_copy_blit(struct radeon_device *rdev,
322
		   uint64_t src_offset,
323
		   uint64_t dst_offset,
324
		   unsigned num_pages,
325
		   struct radeon_fence *fence)
326
{
327
	uint32_t cur_pages;
328
	uint32_t stride_bytes = PAGE_SIZE;
329
	uint32_t pitch;
330
	uint32_t stride_pixels;
331
	unsigned ndw;
332
	int num_loops;
333
	int r = 0;
334
 
335
	/* radeon limited to 16k stride */
336
	stride_bytes &= 0x3fff;
337
	/* radeon pitch is /64 */
338
	pitch = stride_bytes / 64;
339
	stride_pixels = stride_bytes / 4;
340
	num_loops = DIV_ROUND_UP(num_pages, 8191);
341
 
342
	/* Ask for enough room for blit + flush + fence */
343
	ndw = 64 + (10 * num_loops);
344
	r = radeon_ring_lock(rdev, ndw);
345
	if (r) {
346
		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
347
		return -EINVAL;
348
	}
349
	while (num_pages > 0) {
350
		cur_pages = num_pages;
351
		if (cur_pages > 8191) {
352
			cur_pages = 8191;
353
		}
354
		num_pages -= cur_pages;
355
 
356
		/* pages are in Y direction - height
357
		   page width in X direction - width */
358
		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
359
		radeon_ring_write(rdev,
360
				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
361
				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
362
				  RADEON_GMC_SRC_CLIPPING |
363
				  RADEON_GMC_DST_CLIPPING |
364
				  RADEON_GMC_BRUSH_NONE |
365
				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
366
				  RADEON_GMC_SRC_DATATYPE_COLOR |
367
				  RADEON_ROP3_S |
368
				  RADEON_DP_SRC_SOURCE_MEMORY |
369
				  RADEON_GMC_CLR_CMP_CNTL_DIS |
370
				  RADEON_GMC_WR_MSK_DIS);
371
		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
372
		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
373
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
374
		radeon_ring_write(rdev, 0);
375
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
376
		radeon_ring_write(rdev, num_pages);
377
		radeon_ring_write(rdev, num_pages);
378
		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
379
	}
380
	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
381
	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
382
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
383
	radeon_ring_write(rdev,
384
			  RADEON_WAIT_2D_IDLECLEAN |
385
			  RADEON_WAIT_HOST_IDLECLEAN |
386
			  RADEON_WAIT_DMA_GUI_IDLE);
387
	if (fence) {
388
		r = radeon_fence_emit(rdev, fence);
389
	}
390
	radeon_ring_unlock_commit(rdev);
391
	return r;
392
}
393
 
394
 
395
/*
396
 * CP
397
 */
398
void r100_ring_start(struct radeon_device *rdev)
399
{
400
	int r;
401
 
402
	r = radeon_ring_lock(rdev, 2);
403
	if (r) {
404
		return;
405
	}
406
	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
407
	radeon_ring_write(rdev,
408
			  RADEON_ISYNC_ANY2D_IDLE3D |
409
			  RADEON_ISYNC_ANY3D_IDLE2D |
410
			  RADEON_ISYNC_WAIT_IDLEGUI |
411
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
412
	radeon_ring_unlock_commit(rdev);
413
}
414
 
415
#endif
416
 
417
static void r100_cp_load_microcode(struct radeon_device *rdev)
418
{
419
	int i;
420
 
1120 serge 421
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 422
 
423
	if (r100_gui_wait_for_idle(rdev)) {
424
		printk(KERN_WARNING "Failed to wait GUI idle while "
425
		       "programming pipes. Bad things might happen.\n");
426
	}
427
 
428
	WREG32(RADEON_CP_ME_RAM_ADDR, 0);
429
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
430
	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
431
	    (rdev->family == CHIP_RS200)) {
432
		DRM_INFO("Loading R100 Microcode\n");
433
		for (i = 0; i < 256; i++) {
434
			WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
435
			WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
436
		}
437
	} else if ((rdev->family == CHIP_R200) ||
438
		   (rdev->family == CHIP_RV250) ||
439
		   (rdev->family == CHIP_RV280) ||
440
		   (rdev->family == CHIP_RS300)) {
441
		DRM_INFO("Loading R200 Microcode\n");
442
		for (i = 0; i < 256; i++) {
443
			WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
444
			WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
445
		}
446
	} else if ((rdev->family == CHIP_R300) ||
447
		   (rdev->family == CHIP_R350) ||
448
		   (rdev->family == CHIP_RV350) ||
449
		   (rdev->family == CHIP_RV380) ||
450
		   (rdev->family == CHIP_RS400) ||
451
		   (rdev->family == CHIP_RS480)) {
452
		DRM_INFO("Loading R300 Microcode\n");
453
		for (i = 0; i < 256; i++) {
454
			WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
455
			WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
456
		}
457
	} else if ((rdev->family == CHIP_R420) ||
458
		   (rdev->family == CHIP_R423) ||
459
		   (rdev->family == CHIP_RV410)) {
460
		DRM_INFO("Loading R400 Microcode\n");
461
		for (i = 0; i < 256; i++) {
462
			WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
463
			WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
464
		}
465
	} else if ((rdev->family == CHIP_RS690) ||
466
		   (rdev->family == CHIP_RS740)) {
467
		DRM_INFO("Loading RS690/RS740 Microcode\n");
468
		for (i = 0; i < 256; i++) {
469
			WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
470
			WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
471
		}
472
	} else if (rdev->family == CHIP_RS600) {
473
		DRM_INFO("Loading RS600 Microcode\n");
474
		for (i = 0; i < 256; i++) {
475
			WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
476
			WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
477
		}
478
	} else if ((rdev->family == CHIP_RV515) ||
479
		   (rdev->family == CHIP_R520) ||
480
		   (rdev->family == CHIP_RV530) ||
481
		   (rdev->family == CHIP_R580) ||
482
		   (rdev->family == CHIP_RV560) ||
483
		   (rdev->family == CHIP_RV570)) {
484
		DRM_INFO("Loading R500 Microcode\n");
485
		for (i = 0; i < 256; i++) {
486
			WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
487
			WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
488
		}
489
	}
490
}
491
 
492
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
493
{
494
	unsigned rb_bufsz;
495
	unsigned rb_blksz;
496
	unsigned max_fetch;
497
	unsigned pre_write_timer;
498
	unsigned pre_write_limit;
499
	unsigned indirect2_start;
500
	unsigned indirect1_start;
501
	uint32_t tmp;
502
	int r;
503
 
1120 serge 504
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 505
 
506
//   if (r100_debugfs_cp_init(rdev)) {
507
//       DRM_ERROR("Failed to register debugfs file for CP !\n");
508
//   }
509
	/* Reset CP */
510
	tmp = RREG32(RADEON_CP_CSQ_STAT);
511
	if ((tmp & (1 << 31))) {
512
		DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
513
		WREG32(RADEON_CP_CSQ_MODE, 0);
514
		WREG32(RADEON_CP_CSQ_CNTL, 0);
515
		WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
516
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
517
		mdelay(2);
518
		WREG32(RADEON_RBBM_SOFT_RESET, 0);
519
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
520
		mdelay(2);
521
		tmp = RREG32(RADEON_CP_CSQ_STAT);
522
		if ((tmp & (1 << 31))) {
523
			DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
524
		}
525
	} else {
526
		DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
527
	}
528
	/* Align ring size */
529
	rb_bufsz = drm_order(ring_size / 8);
530
	ring_size = (1 << (rb_bufsz + 1)) * 4;
531
	r100_cp_load_microcode(rdev);
532
	r = radeon_ring_init(rdev, ring_size);
533
	if (r) {
534
		return r;
535
	}
536
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
537
	 * the rptr copy in system ram */
538
	rb_blksz = 9;
539
	/* cp will read 128bytes at a time (4 dwords) */
540
	max_fetch = 1;
541
	rdev->cp.align_mask = 16 - 1;
542
	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
543
	pre_write_timer = 64;
544
	/* Force CP_RB_WPTR write if written more than one time before the
545
	 * delay expire
546
	 */
547
	pre_write_limit = 0;
548
	/* Setup the cp cache like this (cache size is 96 dwords) :
549
	 *	RING		0  to 15
550
	 *	INDIRECT1	16 to 79
551
	 *	INDIRECT2	80 to 95
552
	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
553
	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
554
	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
555
	 * Idea being that most of the gpu cmd will be through indirect1 buffer
556
	 * so it gets the bigger cache.
557
	 */
558
	indirect2_start = 80;
559
	indirect1_start = 16;
560
	/* cp setup */
561
	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
562
	WREG32(RADEON_CP_RB_CNTL,
563
#ifdef __BIG_ENDIAN
564
	       RADEON_BUF_SWAP_32BIT |
565
#endif
566
	       REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
567
	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
568
	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
569
	       RADEON_RB_NO_UPDATE);
570
	/* Set ring address */
571
	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
572
	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
573
	/* Force read & write ptr to 0 */
574
	tmp = RREG32(RADEON_CP_RB_CNTL);
575
	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
576
	WREG32(RADEON_CP_RB_RPTR_WR, 0);
577
	WREG32(RADEON_CP_RB_WPTR, 0);
578
	WREG32(RADEON_CP_RB_CNTL, tmp);
579
	udelay(10);
580
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
581
	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
582
	/* Set cp mode to bus mastering & enable cp*/
583
	WREG32(RADEON_CP_CSQ_MODE,
584
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
585
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
586
	WREG32(0x718, 0);
587
	WREG32(0x744, 0x00004D4D);
588
	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
589
	radeon_ring_start(rdev);
590
	r = radeon_ring_test(rdev);
591
	if (r) {
592
		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
593
		return r;
594
	}
595
	rdev->cp.ready = true;
596
	return 0;
597
}
598
 
599
#if 0
600
 
601
void r100_cp_fini(struct radeon_device *rdev)
602
{
603
	/* Disable ring */
604
	rdev->cp.ready = false;
605
	WREG32(RADEON_CP_CSQ_CNTL, 0);
606
	radeon_ring_fini(rdev);
607
	DRM_INFO("radeon: cp finalized\n");
608
}
609
 
610
void r100_cp_disable(struct radeon_device *rdev)
611
{
612
	/* Disable ring */
613
	rdev->cp.ready = false;
614
	WREG32(RADEON_CP_CSQ_MODE, 0);
615
	WREG32(RADEON_CP_CSQ_CNTL, 0);
616
	if (r100_gui_wait_for_idle(rdev)) {
617
		printk(KERN_WARNING "Failed to wait GUI idle while "
618
		       "programming pipes. Bad things might happen.\n");
619
	}
620
}
621
 
622
#endif
623
 
624
int r100_cp_reset(struct radeon_device *rdev)
625
{
626
	uint32_t tmp;
627
	bool reinit_cp;
628
	int i;
629
 
1120 serge 630
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 631
 
632
 
633
	reinit_cp = rdev->cp.ready;
634
	rdev->cp.ready = false;
635
	WREG32(RADEON_CP_CSQ_MODE, 0);
636
	WREG32(RADEON_CP_CSQ_CNTL, 0);
637
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
638
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
639
	udelay(200);
640
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
641
	/* Wait to prevent race in RBBM_STATUS */
642
	mdelay(1);
643
	for (i = 0; i < rdev->usec_timeout; i++) {
644
		tmp = RREG32(RADEON_RBBM_STATUS);
645
		if (!(tmp & (1 << 16))) {
646
			DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
647
				 tmp);
648
			if (reinit_cp) {
649
				return r100_cp_init(rdev, rdev->cp.ring_size);
650
			}
651
			return 0;
652
		}
653
		DRM_UDELAY(1);
654
	}
655
	tmp = RREG32(RADEON_RBBM_STATUS);
656
	DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
657
	return -1;
658
}
659
 
660
#if 0
661
/*
662
 * CS functions
663
 */
664
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
665
			  struct radeon_cs_packet *pkt,
666
			  const unsigned *auth, unsigned n,
667
			  radeon_packet0_check_t check)
668
{
669
	unsigned reg;
670
	unsigned i, j, m;
671
	unsigned idx;
672
	int r;
673
 
674
	idx = pkt->idx + 1;
675
	reg = pkt->reg;
676
	/* Check that register fall into register range
677
	 * determined by the number of entry (n) in the
678
	 * safe register bitmap.
679
	 */
680
	if (pkt->one_reg_wr) {
681
		if ((reg >> 7) > n) {
682
			return -EINVAL;
683
		}
684
	} else {
685
		if (((reg + (pkt->count << 2)) >> 7) > n) {
686
			return -EINVAL;
687
		}
688
	}
689
	for (i = 0; i <= pkt->count; i++, idx++) {
690
		j = (reg >> 7);
691
		m = 1 << ((reg >> 2) & 31);
692
		if (auth[j] & m) {
693
			r = check(p, pkt, idx, reg);
694
			if (r) {
695
				return r;
696
			}
697
		}
698
		if (pkt->one_reg_wr) {
699
			if (!(auth[j] & m)) {
700
				break;
701
			}
702
		} else {
703
			reg += 4;
704
		}
705
	}
706
	return 0;
707
}
708
 
709
void r100_cs_dump_packet(struct radeon_cs_parser *p,
710
			 struct radeon_cs_packet *pkt)
711
{
712
	struct radeon_cs_chunk *ib_chunk;
713
	volatile uint32_t *ib;
714
	unsigned i;
715
	unsigned idx;
716
 
717
	ib = p->ib->ptr;
718
	ib_chunk = &p->chunks[p->chunk_ib_idx];
719
	idx = pkt->idx;
720
	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
721
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
722
	}
723
}
724
 
725
/**
726
 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
727
 * @parser:	parser structure holding parsing context.
728
 * @pkt:	where to store packet informations
729
 *
730
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
731
 * if packet is bigger than remaining ib size. or if packets is unknown.
732
 **/
733
int r100_cs_packet_parse(struct radeon_cs_parser *p,
734
			 struct radeon_cs_packet *pkt,
735
			 unsigned idx)
736
{
737
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
738
	uint32_t header = ib_chunk->kdata[idx];
739
 
740
	if (idx >= ib_chunk->length_dw) {
741
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
742
			  idx, ib_chunk->length_dw);
743
		return -EINVAL;
744
	}
745
	pkt->idx = idx;
746
	pkt->type = CP_PACKET_GET_TYPE(header);
747
	pkt->count = CP_PACKET_GET_COUNT(header);
748
	switch (pkt->type) {
749
	case PACKET_TYPE0:
750
		pkt->reg = CP_PACKET0_GET_REG(header);
751
		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
752
		break;
753
	case PACKET_TYPE3:
754
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
755
		break;
756
	case PACKET_TYPE2:
757
		pkt->count = -1;
758
		break;
759
	default:
760
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
761
		return -EINVAL;
762
	}
763
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
764
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
765
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
766
		return -EINVAL;
767
	}
768
	return 0;
769
}
770
 
771
/**
772
 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
773
 * @parser:		parser structure holding parsing context.
774
 * @data:		pointer to relocation data
775
 * @offset_start:	starting offset
776
 * @offset_mask:	offset mask (to align start offset on)
777
 * @reloc:		reloc informations
778
 *
779
 * Check next packet is relocation packet3, do bo validation and compute
780
 * GPU offset using the provided start.
781
 **/
782
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
783
			      struct radeon_cs_reloc **cs_reloc)
784
{
785
	struct radeon_cs_chunk *ib_chunk;
786
	struct radeon_cs_chunk *relocs_chunk;
787
	struct radeon_cs_packet p3reloc;
788
	unsigned idx;
789
	int r;
790
 
791
	if (p->chunk_relocs_idx == -1) {
792
		DRM_ERROR("No relocation chunk !\n");
793
		return -EINVAL;
794
	}
795
	*cs_reloc = NULL;
796
	ib_chunk = &p->chunks[p->chunk_ib_idx];
797
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
798
	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
799
	if (r) {
800
		return r;
801
	}
802
	p->idx += p3reloc.count + 2;
803
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
804
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
805
			  p3reloc.idx);
806
		r100_cs_dump_packet(p, &p3reloc);
807
		return -EINVAL;
808
	}
809
	idx = ib_chunk->kdata[p3reloc.idx + 1];
810
	if (idx >= relocs_chunk->length_dw) {
811
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
812
			  idx, relocs_chunk->length_dw);
813
		r100_cs_dump_packet(p, &p3reloc);
814
		return -EINVAL;
815
	}
816
	/* FIXME: we assume reloc size is 4 dwords */
817
	*cs_reloc = p->relocs_ptr[(idx / 4)];
818
	return 0;
819
}
820
 
821
static int r100_packet0_check(struct radeon_cs_parser *p,
822
			      struct radeon_cs_packet *pkt)
823
{
824
	struct radeon_cs_chunk *ib_chunk;
825
	struct radeon_cs_reloc *reloc;
826
	volatile uint32_t *ib;
827
	uint32_t tmp;
828
	unsigned reg;
829
	unsigned i;
830
	unsigned idx;
831
	bool onereg;
832
	int r;
833
 
834
	ib = p->ib->ptr;
835
	ib_chunk = &p->chunks[p->chunk_ib_idx];
836
	idx = pkt->idx + 1;
837
	reg = pkt->reg;
838
	onereg = false;
839
	if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
840
		onereg = true;
841
	}
842
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
843
		switch (reg) {
844
		/* FIXME: only allow PACKET3 blit? easier to check for out of
845
		 * range access */
846
		case RADEON_DST_PITCH_OFFSET:
847
		case RADEON_SRC_PITCH_OFFSET:
848
			r = r100_cs_packet_next_reloc(p, &reloc);
849
			if (r) {
850
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
851
					  idx, reg);
852
				r100_cs_dump_packet(p, pkt);
853
				return r;
854
			}
855
			tmp = ib_chunk->kdata[idx] & 0x003fffff;
856
			tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
857
			ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
858
			break;
859
		case RADEON_RB3D_DEPTHOFFSET:
860
		case RADEON_RB3D_COLOROFFSET:
861
		case R300_RB3D_COLOROFFSET0:
862
		case R300_ZB_DEPTHOFFSET:
863
		case R200_PP_TXOFFSET_0:
864
		case R200_PP_TXOFFSET_1:
865
		case R200_PP_TXOFFSET_2:
866
		case R200_PP_TXOFFSET_3:
867
		case R200_PP_TXOFFSET_4:
868
		case R200_PP_TXOFFSET_5:
869
		case RADEON_PP_TXOFFSET_0:
870
		case RADEON_PP_TXOFFSET_1:
871
		case RADEON_PP_TXOFFSET_2:
872
		case R300_TX_OFFSET_0:
873
		case R300_TX_OFFSET_0+4:
874
		case R300_TX_OFFSET_0+8:
875
		case R300_TX_OFFSET_0+12:
876
		case R300_TX_OFFSET_0+16:
877
		case R300_TX_OFFSET_0+20:
878
		case R300_TX_OFFSET_0+24:
879
		case R300_TX_OFFSET_0+28:
880
		case R300_TX_OFFSET_0+32:
881
		case R300_TX_OFFSET_0+36:
882
		case R300_TX_OFFSET_0+40:
883
		case R300_TX_OFFSET_0+44:
884
		case R300_TX_OFFSET_0+48:
885
		case R300_TX_OFFSET_0+52:
886
		case R300_TX_OFFSET_0+56:
887
		case R300_TX_OFFSET_0+60:
888
			r = r100_cs_packet_next_reloc(p, &reloc);
889
			if (r) {
890
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
891
					  idx, reg);
892
				r100_cs_dump_packet(p, pkt);
893
				return r;
894
			}
895
			ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
896
			break;
897
		default:
898
			/* FIXME: we don't want to allow anyothers packet */
899
			break;
900
		}
901
		if (onereg) {
902
			/* FIXME: forbid onereg write to register on relocate */
903
			break;
904
		}
905
	}
906
	return 0;
907
}
908
 
909
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
910
					 struct radeon_cs_packet *pkt,
911
					 struct radeon_object *robj)
912
{
913
	struct radeon_cs_chunk *ib_chunk;
914
	unsigned idx;
915
 
916
	ib_chunk = &p->chunks[p->chunk_ib_idx];
917
	idx = pkt->idx + 1;
918
	if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
919
		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
920
			  "(need %u have %lu) !\n",
921
			  ib_chunk->kdata[idx+2] + 1,
922
			  radeon_object_size(robj));
923
		return -EINVAL;
924
	}
925
	return 0;
926
}
927
 
928
static int r100_packet3_check(struct radeon_cs_parser *p,
929
			      struct radeon_cs_packet *pkt)
930
{
931
	struct radeon_cs_chunk *ib_chunk;
932
	struct radeon_cs_reloc *reloc;
933
	unsigned idx;
934
	unsigned i, c;
935
	volatile uint32_t *ib;
936
	int r;
937
 
938
	ib = p->ib->ptr;
939
	ib_chunk = &p->chunks[p->chunk_ib_idx];
940
	idx = pkt->idx + 1;
941
	switch (pkt->opcode) {
942
	case PACKET3_3D_LOAD_VBPNTR:
943
		c = ib_chunk->kdata[idx++];
944
		for (i = 0; i < (c - 1); i += 2, idx += 3) {
945
			r = r100_cs_packet_next_reloc(p, &reloc);
946
			if (r) {
947
				DRM_ERROR("No reloc for packet3 %d\n",
948
					  pkt->opcode);
949
				r100_cs_dump_packet(p, pkt);
950
				return r;
951
			}
952
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
953
			r = r100_cs_packet_next_reloc(p, &reloc);
954
			if (r) {
955
				DRM_ERROR("No reloc for packet3 %d\n",
956
					  pkt->opcode);
957
				r100_cs_dump_packet(p, pkt);
958
				return r;
959
			}
960
			ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
961
		}
962
		if (c & 1) {
963
			r = r100_cs_packet_next_reloc(p, &reloc);
964
			if (r) {
965
				DRM_ERROR("No reloc for packet3 %d\n",
966
					  pkt->opcode);
967
				r100_cs_dump_packet(p, pkt);
968
				return r;
969
			}
970
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
971
		}
972
		break;
973
	case PACKET3_INDX_BUFFER:
974
		r = r100_cs_packet_next_reloc(p, &reloc);
975
		if (r) {
976
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
977
			r100_cs_dump_packet(p, pkt);
978
			return r;
979
		}
980
		ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
981
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
982
		if (r) {
983
			return r;
984
		}
985
		break;
986
	case 0x23:
987
		/* FIXME: cleanup */
988
		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
989
		r = r100_cs_packet_next_reloc(p, &reloc);
990
		if (r) {
991
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
992
			r100_cs_dump_packet(p, pkt);
993
			return r;
994
		}
995
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
996
		break;
997
	case PACKET3_3D_DRAW_IMMD:
998
		/* triggers drawing using in-packet vertex data */
999
	case PACKET3_3D_DRAW_IMMD_2:
1000
		/* triggers drawing using in-packet vertex data */
1001
	case PACKET3_3D_DRAW_VBUF_2:
1002
		/* triggers drawing of vertex buffers setup elsewhere */
1003
	case PACKET3_3D_DRAW_INDX_2:
1004
		/* triggers drawing using indices to vertex buffer */
1005
	case PACKET3_3D_DRAW_VBUF:
1006
		/* triggers drawing of vertex buffers setup elsewhere */
1007
	case PACKET3_3D_DRAW_INDX:
1008
		/* triggers drawing using indices to vertex buffer */
1009
	case PACKET3_NOP:
1010
		break;
1011
	default:
1012
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1013
		return -EINVAL;
1014
	}
1015
	return 0;
1016
}
1017
 
1018
int r100_cs_parse(struct radeon_cs_parser *p)
1019
{
1020
	struct radeon_cs_packet pkt;
1021
	int r;
1022
 
1023
	do {
1024
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1025
		if (r) {
1026
			return r;
1027
		}
1028
		p->idx += pkt.count + 2;
1029
		switch (pkt.type) {
1030
			case PACKET_TYPE0:
1031
				r = r100_packet0_check(p, &pkt);
1032
				break;
1033
			case PACKET_TYPE2:
1034
				break;
1035
			case PACKET_TYPE3:
1036
				r = r100_packet3_check(p, &pkt);
1037
				break;
1038
			default:
1039
				DRM_ERROR("Unknown packet type %d !\n",
1040
					  pkt.type);
1041
				return -EINVAL;
1042
		}
1043
		if (r) {
1044
			return r;
1045
		}
1046
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1047
	return 0;
1048
}
1049
 
1050
 
1051
/*
1052
 * Global GPU functions
1053
 */
1054
void r100_errata(struct radeon_device *rdev)
1055
{
1056
	rdev->pll_errata = 0;
1057
 
1058
	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1059
		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1060
	}
1061
 
1062
	if (rdev->family == CHIP_RV100 ||
1063
	    rdev->family == CHIP_RS100 ||
1064
	    rdev->family == CHIP_RS200) {
1065
		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1066
	}
1067
}
1068
 
1069
#endif
1070
 
1071
 
1072
/* Wait for vertical sync on primary CRTC */
1073
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1074
{
1075
	uint32_t crtc_gen_cntl, tmp;
1076
	int i;
1077
 
1078
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1079
	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1080
	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1081
		return;
1082
	}
1083
	/* Clear the CRTC_VBLANK_SAVE bit */
1084
	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1085
	for (i = 0; i < rdev->usec_timeout; i++) {
1086
		tmp = RREG32(RADEON_CRTC_STATUS);
1087
		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1088
			return;
1089
		}
1090
		DRM_UDELAY(1);
1091
	}
1092
}
1093
 
1094
/* Wait for vertical sync on secondary CRTC */
1095
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1096
{
1097
	uint32_t crtc2_gen_cntl, tmp;
1098
	int i;
1099
 
1100
	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1101
	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1102
	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1103
		return;
1104
 
1105
	/* Clear the CRTC_VBLANK_SAVE bit */
1106
	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1107
	for (i = 0; i < rdev->usec_timeout; i++) {
1108
		tmp = RREG32(RADEON_CRTC2_STATUS);
1109
		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1110
			return;
1111
		}
1112
		DRM_UDELAY(1);
1113
	}
1114
}
1115
 
1116
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1117
{
1118
	unsigned i;
1119
	uint32_t tmp;
1120
 
1121
	for (i = 0; i < rdev->usec_timeout; i++) {
1122
		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1123
		if (tmp >= n) {
1124
			return 0;
1125
		}
1126
		DRM_UDELAY(1);
1127
	}
1128
	return -1;
1129
}
1130
 
1131
int r100_gui_wait_for_idle(struct radeon_device *rdev)
1132
{
1133
	unsigned i;
1134
	uint32_t tmp;
1135
 
1136
	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1137
		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1138
		       " Bad things might happen.\n");
1139
	}
1140
	for (i = 0; i < rdev->usec_timeout; i++) {
1141
		tmp = RREG32(RADEON_RBBM_STATUS);
1142
		if (!(tmp & (1 << 31))) {
1143
			return 0;
1144
		}
1145
		DRM_UDELAY(1);
1146
	}
1147
	return -1;
1148
}
1149
 
1150
int r100_mc_wait_for_idle(struct radeon_device *rdev)
1151
{
1152
	unsigned i;
1153
	uint32_t tmp;
1154
 
1155
	for (i = 0; i < rdev->usec_timeout; i++) {
1156
		/* read MC_STATUS */
1157
		tmp = RREG32(0x0150);
1158
		if (tmp & (1 << 2)) {
1159
			return 0;
1160
		}
1161
		DRM_UDELAY(1);
1162
	}
1163
	return -1;
1164
}
1165
 
1166
void r100_gpu_init(struct radeon_device *rdev)
1167
{
1168
	/* TODO: anythings to do here ? pipes ? */
1169
	r100_hdp_reset(rdev);
1170
}
1171
 
1172
void r100_hdp_reset(struct radeon_device *rdev)
1173
{
1174
	uint32_t tmp;
1175
 
1120 serge 1176
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 1177
 
1178
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1179
	tmp |= (7 << 28);
1180
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1181
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1182
	udelay(200);
1183
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1184
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
1185
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1186
}
1187
 
1188
int r100_rb2d_reset(struct radeon_device *rdev)
1189
{
1190
	uint32_t tmp;
1191
	int i;
1192
 
1120 serge 1193
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 1194
 
1195
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1196
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
1197
	udelay(200);
1198
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1199
	/* Wait to prevent race in RBBM_STATUS */
1200
	mdelay(1);
1201
	for (i = 0; i < rdev->usec_timeout; i++) {
1202
		tmp = RREG32(RADEON_RBBM_STATUS);
1203
		if (!(tmp & (1 << 26))) {
1204
			DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1205
				 tmp);
1206
			return 0;
1207
		}
1208
		DRM_UDELAY(1);
1209
	}
1210
	tmp = RREG32(RADEON_RBBM_STATUS);
1211
	DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1212
	return -1;
1213
}
1214
 
1215
#if 0
1216
 
1217
int r100_gpu_reset(struct radeon_device *rdev)
1218
{
1219
	uint32_t status;
1220
 
1221
	/* reset order likely matter */
1222
	status = RREG32(RADEON_RBBM_STATUS);
1223
	/* reset HDP */
1224
	r100_hdp_reset(rdev);
1225
	/* reset rb2d */
1226
	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1227
		r100_rb2d_reset(rdev);
1228
	}
1229
	/* TODO: reset 3D engine */
1230
	/* reset CP */
1231
	status = RREG32(RADEON_RBBM_STATUS);
1232
	if (status & (1 << 16)) {
1233
		r100_cp_reset(rdev);
1234
	}
1235
	/* Check if GPU is idle */
1236
	status = RREG32(RADEON_RBBM_STATUS);
1237
	if (status & (1 << 31)) {
1238
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1239
		return -1;
1240
	}
1241
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1242
	return 0;
1243
}
1244
 
1245
 
1246
/*
1247
 * VRAM info
1248
 */
1249
static void r100_vram_get_type(struct radeon_device *rdev)
1250
{
1251
	uint32_t tmp;
1252
 
1253
	rdev->mc.vram_is_ddr = false;
1254
	if (rdev->flags & RADEON_IS_IGP)
1255
		rdev->mc.vram_is_ddr = true;
1256
	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1257
		rdev->mc.vram_is_ddr = true;
1258
	if ((rdev->family == CHIP_RV100) ||
1259
	    (rdev->family == CHIP_RS100) ||
1260
	    (rdev->family == CHIP_RS200)) {
1261
		tmp = RREG32(RADEON_MEM_CNTL);
1262
		if (tmp & RV100_HALF_MODE) {
1263
			rdev->mc.vram_width = 32;
1264
		} else {
1265
			rdev->mc.vram_width = 64;
1266
		}
1267
		if (rdev->flags & RADEON_SINGLE_CRTC) {
1268
			rdev->mc.vram_width /= 4;
1269
			rdev->mc.vram_is_ddr = true;
1270
		}
1271
	} else if (rdev->family <= CHIP_RV280) {
1272
		tmp = RREG32(RADEON_MEM_CNTL);
1273
		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1274
			rdev->mc.vram_width = 128;
1275
		} else {
1276
			rdev->mc.vram_width = 64;
1277
		}
1278
	} else {
1279
		/* newer IGPs */
1280
		rdev->mc.vram_width = 128;
1281
	}
1282
}
1283
 
1284
void r100_vram_info(struct radeon_device *rdev)
1285
{
1286
	r100_vram_get_type(rdev);
1287
 
1288
	if (rdev->flags & RADEON_IS_IGP) {
1289
		uint32_t tom;
1290
		/* read NB_TOM to get the amount of ram stolen for the GPU */
1291
		tom = RREG32(RADEON_NB_TOM);
1292
		rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1293
		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1294
	} else {
1295
		rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1296
		/* Some production boards of m6 will report 0
1297
		 * if it's 8 MB
1298
		 */
1299
		if (rdev->mc.vram_size == 0) {
1300
			rdev->mc.vram_size = 8192 * 1024;
1301
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1302
		}
1303
	}
1304
 
1305
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1306
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1307
}
1308
 
1119 serge 1309
#endif
1117 serge 1310
 
1311
/*
1312
 * Indirect registers accessor
1313
 */
1314
void r100_pll_errata_after_index(struct radeon_device *rdev)
1315
{
1316
	if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1317
		return;
1318
	}
1319
	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
1320
	(void)RREG32(RADEON_CRTC_GEN_CNTL);
1321
}
1322
 
1323
static void r100_pll_errata_after_data(struct radeon_device *rdev)
1324
{
1325
	/* This workarounds is necessary on RV100, RS100 and RS200 chips
1326
	 * or the chip could hang on a subsequent access
1327
	 */
1328
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1329
		udelay(5000);
1330
	}
1331
 
1332
	/* This function is required to workaround a hardware bug in some (all?)
1333
	 * revisions of the R300.  This workaround should be called after every
1334
	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
1335
	 * may not be correct.
1336
	 */
1337
	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1338
		uint32_t save, tmp;
1339
 
1340
		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1341
		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1342
		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1343
		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1344
		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1345
	}
1346
}
1347
 
1348
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1349
{
1350
	uint32_t data;
1351
 
1352
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1353
	r100_pll_errata_after_index(rdev);
1354
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
1355
	r100_pll_errata_after_data(rdev);
1356
	return data;
1357
}
1358
 
1359
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1360
{
1361
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1362
	r100_pll_errata_after_index(rdev);
1363
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
1364
	r100_pll_errata_after_data(rdev);
1365
}
1366
 
1367
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1368
{
1369
	if (reg < 0x10000)
1370
		return readl(((void __iomem *)rdev->rmmio) + reg);
1371
	else {
1372
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1373
		return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1374
	}
1375
}
1376
 
1377
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1378
{
1379
	if (reg < 0x10000)
1380
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
1381
	else {
1382
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1383
		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1384
	}
1385
}
1386
 
1387
int r100_init(struct radeon_device *rdev)
1388
{
1389
	return 0;
1390
}
1391