Subversion Repositories Kolibri OS

Rev

Rev 1125 | Rev 1129 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
1125 serge 29
#include "drmP.h"
30
#include "drm.h"
1117 serge 31
#include "radeon_drm.h"
32
#include "radeon_microcode.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
35
 
36
/* This files gather functions specifics to:
37
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
38
 *
39
 * Some of these functions might be used by newer ASICs.
40
 */
41
void r100_hdp_reset(struct radeon_device *rdev);
42
void r100_gpu_init(struct radeon_device *rdev);
43
int r100_gui_wait_for_idle(struct radeon_device *rdev);
44
int r100_mc_wait_for_idle(struct radeon_device *rdev);
45
void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
46
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
47
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
48
 
49
/*
50
 * PCI GART
51
 */
52
void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
53
{
54
	/* TODO: can we do somethings here ? */
55
	/* It seems hw only cache one entry so we should discard this
56
	 * entry otherwise if first GPU GART read hit this entry it
57
	 * could end up in wrong address. */
58
}
59
 
60
int r100_pci_gart_enable(struct radeon_device *rdev)
61
{
62
	uint32_t tmp;
63
	int r;
64
 
65
	/* Initialize common gart structure */
66
	r = radeon_gart_init(rdev);
67
	if (r) {
68
		return r;
69
	}
70
	if (rdev->gart.table.ram.ptr == NULL) {
71
		rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
72
		r = radeon_gart_table_ram_alloc(rdev);
73
		if (r) {
74
			return r;
75
		}
76
	}
77
	/* discard memory request outside of configured range */
78
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
79
	WREG32(RADEON_AIC_CNTL, tmp);
80
	/* set address range for PCI address translate */
81
	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
82
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
83
	WREG32(RADEON_AIC_HI_ADDR, tmp);
84
	/* Enable bus mastering */
85
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
86
	WREG32(RADEON_BUS_CNTL, tmp);
87
	/* set PCI GART page-table base address */
88
	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
89
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
90
	WREG32(RADEON_AIC_CNTL, tmp);
91
	r100_pci_gart_tlb_flush(rdev);
92
	rdev->gart.ready = true;
93
	return 0;
94
}
95
 
96
void r100_pci_gart_disable(struct radeon_device *rdev)
97
{
98
	uint32_t tmp;
99
 
100
	/* discard memory request outside of configured range */
101
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
102
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
103
	WREG32(RADEON_AIC_LO_ADDR, 0);
104
	WREG32(RADEON_AIC_HI_ADDR, 0);
105
}
106
 
1128 serge 107
 
1117 serge 108
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
109
{
110
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
111
		return -EINVAL;
112
	}
113
	rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
114
	return 0;
115
}
116
 
117
int r100_gart_enable(struct radeon_device *rdev)
118
{
119
	if (rdev->flags & RADEON_IS_AGP) {
120
		r100_pci_gart_disable(rdev);
121
		return 0;
122
	}
123
	return r100_pci_gart_enable(rdev);
124
}
125
 
126
 
127
/*
128
 * MC
129
 */
130
void r100_mc_disable_clients(struct radeon_device *rdev)
131
{
132
	uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
133
 
134
	/* FIXME: is this function correct for rs100,rs200,rs300 ? */
1128 serge 135
	if (r100_gui_wait_for_idle(rdev)) {
136
		printk(KERN_WARNING "Failed to wait GUI idle while "
137
		       "programming pipes. Bad things might happen.\n");
138
	}
1117 serge 139
 
140
	/* stop display and memory access */
141
	ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
142
	WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
143
	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
144
	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
145
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
146
 
147
	r100_gpu_wait_for_vsync(rdev);
148
 
149
	WREG32(RADEON_CRTC_GEN_CNTL,
150
	       (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
151
	       RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
152
 
153
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
154
		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
155
 
156
		r100_gpu_wait_for_vsync2(rdev);
157
		WREG32(RADEON_CRTC2_GEN_CNTL,
158
		       (crtc2_gen_cntl &
159
		        ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
160
		       RADEON_CRTC2_DISP_REQ_EN_B);
161
	}
162
 
163
	udelay(500);
164
}
165
 
166
void r100_mc_setup(struct radeon_device *rdev)
167
{
168
	uint32_t tmp;
169
	int r;
170
 
1128 serge 171
//   r = r100_debugfs_mc_info_init(rdev);
172
//   if (r) {
173
//       DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174
//   }
1117 serge 175
	/* Write VRAM size in case we are limiting it */
176
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
177
	tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
178
	tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179
	tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180
	WREG32(RADEON_MC_FB_LOCATION, tmp);
181
 
182
	/* Enable bus mastering */
183
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
184
	WREG32(RADEON_BUS_CNTL, tmp);
185
 
186
	if (rdev->flags & RADEON_IS_AGP) {
187
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
188
		tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
189
		tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
190
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
191
		WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
192
	} else {
193
		WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
194
		WREG32(RADEON_AGP_BASE, 0);
195
	}
196
 
197
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
198
	tmp |= (7 << 28);
199
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
200
	(void)RREG32(RADEON_HOST_PATH_CNTL);
201
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
202
	(void)RREG32(RADEON_HOST_PATH_CNTL);
203
}
204
 
205
int r100_mc_init(struct radeon_device *rdev)
206
{
207
	int r;
208
 
1128 serge 209
//   if (r100_debugfs_rbbm_init(rdev)) {
210
//       DRM_ERROR("Failed to register debugfs file for RBBM !\n");
211
//   }
1117 serge 212
 
213
	r100_gpu_init(rdev);
214
	/* Disable gart which also disable out of gart access */
215
	r100_pci_gart_disable(rdev);
216
 
217
	/* Setup GPU memory space */
218
	rdev->mc.vram_location = 0xFFFFFFFFUL;
219
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
220
	if (rdev->flags & RADEON_IS_AGP) {
221
		r = radeon_agp_init(rdev);
222
		if (r) {
223
			printk(KERN_WARNING "[drm] Disabling AGP\n");
224
			rdev->flags &= ~RADEON_IS_AGP;
225
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
226
		} else {
227
			rdev->mc.gtt_location = rdev->mc.agp_base;
228
		}
229
	}
230
	r = radeon_mc_setup(rdev);
231
	if (r) {
232
		return r;
233
	}
234
 
235
	r100_mc_disable_clients(rdev);
236
	if (r100_mc_wait_for_idle(rdev)) {
237
       printk(KERN_WARNING "Failed to wait MC idle while "
238
              "programming pipes. Bad things might happen.\n");
239
	}
240
 
241
	r100_mc_setup(rdev);
242
	return 0;
243
}
244
 
245
void r100_mc_fini(struct radeon_device *rdev)
246
{
247
	r100_pci_gart_disable(rdev);
1128 serge 248
//   radeon_gart_table_ram_free(rdev);
249
//   radeon_gart_fini(rdev);
1117 serge 250
}
251
 
252
/*
253
 * Fence emission
254
 */
255
void r100_fence_ring_emit(struct radeon_device *rdev,
256
			  struct radeon_fence *fence)
257
{
258
	/* Who ever call radeon_fence_emit should call ring_lock and ask
259
	 * for enough space (today caller are ib schedule and buffer move) */
260
	/* Wait until IDLE & CLEAN */
261
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
262
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
263
	/* Emit fence sequence & fire IRQ */
264
	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
265
	radeon_ring_write(rdev, fence->seq);
266
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
267
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
268
}
269
 
1128 serge 270
#if 0
1117 serge 271
/*
272
 * Writeback
273
 */
274
int r100_wb_init(struct radeon_device *rdev)
275
{
276
	int r;
277
 
278
	if (rdev->wb.wb_obj == NULL) {
279
		r = radeon_object_create(rdev, NULL, 4096,
280
					 true,
281
					 RADEON_GEM_DOMAIN_GTT,
282
					 false, &rdev->wb.wb_obj);
283
		if (r) {
284
			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
285
			return r;
286
		}
287
		r = radeon_object_pin(rdev->wb.wb_obj,
288
				      RADEON_GEM_DOMAIN_GTT,
289
				      &rdev->wb.gpu_addr);
290
		if (r) {
291
			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
292
			return r;
293
		}
294
		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
295
		if (r) {
296
			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
297
			return r;
298
		}
299
	}
300
	WREG32(0x774, rdev->wb.gpu_addr);
301
	WREG32(0x70C, rdev->wb.gpu_addr + 1024);
302
	WREG32(0x770, 0xff);
303
	return 0;
304
}
305
 
306
void r100_wb_fini(struct radeon_device *rdev)
307
{
308
	if (rdev->wb.wb_obj) {
1120 serge 309
//       radeon_object_kunmap(rdev->wb.wb_obj);
310
//       radeon_object_unpin(rdev->wb.wb_obj);
311
//       radeon_object_unref(&rdev->wb.wb_obj);
1117 serge 312
		rdev->wb.wb = NULL;
313
		rdev->wb.wb_obj = NULL;
314
	}
315
}
316
 
1120 serge 317
 
1117 serge 318
int r100_copy_blit(struct radeon_device *rdev,
319
		   uint64_t src_offset,
320
		   uint64_t dst_offset,
321
		   unsigned num_pages,
322
		   struct radeon_fence *fence)
323
{
324
	uint32_t cur_pages;
325
	uint32_t stride_bytes = PAGE_SIZE;
326
	uint32_t pitch;
327
	uint32_t stride_pixels;
328
	unsigned ndw;
329
	int num_loops;
330
	int r = 0;
331
 
332
	/* radeon limited to 16k stride */
333
	stride_bytes &= 0x3fff;
334
	/* radeon pitch is /64 */
335
	pitch = stride_bytes / 64;
336
	stride_pixels = stride_bytes / 4;
337
	num_loops = DIV_ROUND_UP(num_pages, 8191);
338
 
339
	/* Ask for enough room for blit + flush + fence */
340
	ndw = 64 + (10 * num_loops);
341
	r = radeon_ring_lock(rdev, ndw);
342
	if (r) {
343
		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
344
		return -EINVAL;
345
	}
346
	while (num_pages > 0) {
347
		cur_pages = num_pages;
348
		if (cur_pages > 8191) {
349
			cur_pages = 8191;
350
		}
351
		num_pages -= cur_pages;
352
 
353
		/* pages are in Y direction - height
354
		   page width in X direction - width */
355
		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
356
		radeon_ring_write(rdev,
357
				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
358
				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
359
				  RADEON_GMC_SRC_CLIPPING |
360
				  RADEON_GMC_DST_CLIPPING |
361
				  RADEON_GMC_BRUSH_NONE |
362
				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
363
				  RADEON_GMC_SRC_DATATYPE_COLOR |
364
				  RADEON_ROP3_S |
365
				  RADEON_DP_SRC_SOURCE_MEMORY |
366
				  RADEON_GMC_CLR_CMP_CNTL_DIS |
367
				  RADEON_GMC_WR_MSK_DIS);
368
		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
369
		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
370
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
371
		radeon_ring_write(rdev, 0);
372
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
373
		radeon_ring_write(rdev, num_pages);
374
		radeon_ring_write(rdev, num_pages);
375
		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
376
	}
377
	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
378
	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
379
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
380
	radeon_ring_write(rdev,
381
			  RADEON_WAIT_2D_IDLECLEAN |
382
			  RADEON_WAIT_HOST_IDLECLEAN |
383
			  RADEON_WAIT_DMA_GUI_IDLE);
384
	if (fence) {
385
		r = radeon_fence_emit(rdev, fence);
386
	}
387
	radeon_ring_unlock_commit(rdev);
388
	return r;
389
}
390
 
1128 serge 391
#endif
1117 serge 392
 
393
/*
394
 * CP
395
 */
396
void r100_ring_start(struct radeon_device *rdev)
397
{
398
	int r;
399
 
400
	r = radeon_ring_lock(rdev, 2);
401
	if (r) {
402
		return;
403
	}
404
	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
405
	radeon_ring_write(rdev,
406
			  RADEON_ISYNC_ANY2D_IDLE3D |
407
			  RADEON_ISYNC_ANY3D_IDLE2D |
408
			  RADEON_ISYNC_WAIT_IDLEGUI |
409
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
410
	radeon_ring_unlock_commit(rdev);
411
}
412
 
413
static void r100_cp_load_microcode(struct radeon_device *rdev)
414
{
415
	int i;
416
 
417
	if (r100_gui_wait_for_idle(rdev)) {
418
		printk(KERN_WARNING "Failed to wait GUI idle while "
419
		       "programming pipes. Bad things might happen.\n");
420
	}
421
 
422
	WREG32(RADEON_CP_ME_RAM_ADDR, 0);
423
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
424
	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
425
	    (rdev->family == CHIP_RS200)) {
426
		DRM_INFO("Loading R100 Microcode\n");
427
		for (i = 0; i < 256; i++) {
428
			WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
429
			WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
430
		}
431
	} else if ((rdev->family == CHIP_R200) ||
432
		   (rdev->family == CHIP_RV250) ||
433
		   (rdev->family == CHIP_RV280) ||
434
		   (rdev->family == CHIP_RS300)) {
435
		DRM_INFO("Loading R200 Microcode\n");
436
		for (i = 0; i < 256; i++) {
437
			WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
438
			WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
439
		}
440
	} else if ((rdev->family == CHIP_R300) ||
441
		   (rdev->family == CHIP_R350) ||
442
		   (rdev->family == CHIP_RV350) ||
443
		   (rdev->family == CHIP_RV380) ||
444
		   (rdev->family == CHIP_RS400) ||
445
		   (rdev->family == CHIP_RS480)) {
446
		DRM_INFO("Loading R300 Microcode\n");
447
		for (i = 0; i < 256; i++) {
448
			WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
449
			WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
450
		}
451
	} else if ((rdev->family == CHIP_R420) ||
452
		   (rdev->family == CHIP_R423) ||
453
		   (rdev->family == CHIP_RV410)) {
454
		DRM_INFO("Loading R400 Microcode\n");
455
		for (i = 0; i < 256; i++) {
456
			WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
457
			WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
458
		}
459
	} else if ((rdev->family == CHIP_RS690) ||
460
		   (rdev->family == CHIP_RS740)) {
461
		DRM_INFO("Loading RS690/RS740 Microcode\n");
462
		for (i = 0; i < 256; i++) {
463
			WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
464
			WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
465
		}
466
	} else if (rdev->family == CHIP_RS600) {
467
		DRM_INFO("Loading RS600 Microcode\n");
468
		for (i = 0; i < 256; i++) {
469
			WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
470
			WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
471
		}
472
	} else if ((rdev->family == CHIP_RV515) ||
473
		   (rdev->family == CHIP_R520) ||
474
		   (rdev->family == CHIP_RV530) ||
475
		   (rdev->family == CHIP_R580) ||
476
		   (rdev->family == CHIP_RV560) ||
477
		   (rdev->family == CHIP_RV570)) {
478
		DRM_INFO("Loading R500 Microcode\n");
479
		for (i = 0; i < 256; i++) {
480
			WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
481
			WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
482
		}
483
	}
484
}
485
 
486
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
487
{
488
	unsigned rb_bufsz;
489
	unsigned rb_blksz;
490
	unsigned max_fetch;
491
	unsigned pre_write_timer;
492
	unsigned pre_write_limit;
493
	unsigned indirect2_start;
494
	unsigned indirect1_start;
495
	uint32_t tmp;
496
	int r;
497
 
1120 serge 498
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 499
 
500
//   if (r100_debugfs_cp_init(rdev)) {
501
//       DRM_ERROR("Failed to register debugfs file for CP !\n");
502
//   }
503
	/* Reset CP */
504
	tmp = RREG32(RADEON_CP_CSQ_STAT);
505
	if ((tmp & (1 << 31))) {
506
		DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
507
		WREG32(RADEON_CP_CSQ_MODE, 0);
508
		WREG32(RADEON_CP_CSQ_CNTL, 0);
509
		WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
510
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
511
		mdelay(2);
512
		WREG32(RADEON_RBBM_SOFT_RESET, 0);
513
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
514
		mdelay(2);
515
		tmp = RREG32(RADEON_CP_CSQ_STAT);
516
		if ((tmp & (1 << 31))) {
517
			DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
518
		}
519
	} else {
520
		DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
521
	}
522
	/* Align ring size */
523
	rb_bufsz = drm_order(ring_size / 8);
524
	ring_size = (1 << (rb_bufsz + 1)) * 4;
525
	r100_cp_load_microcode(rdev);
526
	r = radeon_ring_init(rdev, ring_size);
527
	if (r) {
528
		return r;
529
	}
530
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
531
	 * the rptr copy in system ram */
532
	rb_blksz = 9;
533
	/* cp will read 128bytes at a time (4 dwords) */
534
	max_fetch = 1;
535
	rdev->cp.align_mask = 16 - 1;
536
	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
537
	pre_write_timer = 64;
538
	/* Force CP_RB_WPTR write if written more than one time before the
539
	 * delay expire
540
	 */
541
	pre_write_limit = 0;
542
	/* Setup the cp cache like this (cache size is 96 dwords) :
543
	 *	RING		0  to 15
544
	 *	INDIRECT1	16 to 79
545
	 *	INDIRECT2	80 to 95
546
	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
547
	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
548
	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
549
	 * Idea being that most of the gpu cmd will be through indirect1 buffer
550
	 * so it gets the bigger cache.
551
	 */
552
	indirect2_start = 80;
553
	indirect1_start = 16;
554
	/* cp setup */
555
	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
556
	WREG32(RADEON_CP_RB_CNTL,
557
#ifdef __BIG_ENDIAN
558
	       RADEON_BUF_SWAP_32BIT |
559
#endif
560
	       REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
561
	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
562
	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
563
	       RADEON_RB_NO_UPDATE);
564
	/* Set ring address */
565
	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
566
	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
567
	/* Force read & write ptr to 0 */
568
	tmp = RREG32(RADEON_CP_RB_CNTL);
569
	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
570
	WREG32(RADEON_CP_RB_RPTR_WR, 0);
571
	WREG32(RADEON_CP_RB_WPTR, 0);
572
	WREG32(RADEON_CP_RB_CNTL, tmp);
573
	udelay(10);
574
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
575
	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
576
	/* Set cp mode to bus mastering & enable cp*/
577
	WREG32(RADEON_CP_CSQ_MODE,
578
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
579
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
580
	WREG32(0x718, 0);
581
	WREG32(0x744, 0x00004D4D);
582
	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
583
	radeon_ring_start(rdev);
584
	r = radeon_ring_test(rdev);
585
	if (r) {
586
		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
587
		return r;
588
	}
589
	rdev->cp.ready = true;
590
	return 0;
591
}
592
 
593
 
594
void r100_cp_fini(struct radeon_device *rdev)
595
{
596
	/* Disable ring */
597
	rdev->cp.ready = false;
598
	WREG32(RADEON_CP_CSQ_CNTL, 0);
599
	radeon_ring_fini(rdev);
600
	DRM_INFO("radeon: cp finalized\n");
601
}
602
 
603
void r100_cp_disable(struct radeon_device *rdev)
604
{
605
	/* Disable ring */
606
	rdev->cp.ready = false;
607
	WREG32(RADEON_CP_CSQ_MODE, 0);
608
	WREG32(RADEON_CP_CSQ_CNTL, 0);
609
	if (r100_gui_wait_for_idle(rdev)) {
610
		printk(KERN_WARNING "Failed to wait GUI idle while "
611
		       "programming pipes. Bad things might happen.\n");
612
	}
613
}
614
 
615
 
616
int r100_cp_reset(struct radeon_device *rdev)
617
{
618
	uint32_t tmp;
619
	bool reinit_cp;
620
	int i;
621
 
1120 serge 622
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 623
 
624
 
625
	reinit_cp = rdev->cp.ready;
626
	rdev->cp.ready = false;
627
	WREG32(RADEON_CP_CSQ_MODE, 0);
628
	WREG32(RADEON_CP_CSQ_CNTL, 0);
629
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
630
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
631
	udelay(200);
632
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
633
	/* Wait to prevent race in RBBM_STATUS */
634
	mdelay(1);
635
	for (i = 0; i < rdev->usec_timeout; i++) {
636
		tmp = RREG32(RADEON_RBBM_STATUS);
637
		if (!(tmp & (1 << 16))) {
638
			DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
639
				 tmp);
640
			if (reinit_cp) {
641
				return r100_cp_init(rdev, rdev->cp.ring_size);
642
			}
643
			return 0;
644
		}
645
		DRM_UDELAY(1);
646
	}
647
	tmp = RREG32(RADEON_RBBM_STATUS);
648
	DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
649
	return -1;
650
}
651
 
652
#if 0
653
/*
654
 * CS functions
655
 */
656
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
657
			  struct radeon_cs_packet *pkt,
658
			  const unsigned *auth, unsigned n,
659
			  radeon_packet0_check_t check)
660
{
661
	unsigned reg;
662
	unsigned i, j, m;
663
	unsigned idx;
664
	int r;
665
 
666
	idx = pkt->idx + 1;
667
	reg = pkt->reg;
668
	/* Check that register fall into register range
669
	 * determined by the number of entry (n) in the
670
	 * safe register bitmap.
671
	 */
672
	if (pkt->one_reg_wr) {
673
		if ((reg >> 7) > n) {
674
			return -EINVAL;
675
		}
676
	} else {
677
		if (((reg + (pkt->count << 2)) >> 7) > n) {
678
			return -EINVAL;
679
		}
680
	}
681
	for (i = 0; i <= pkt->count; i++, idx++) {
682
		j = (reg >> 7);
683
		m = 1 << ((reg >> 2) & 31);
684
		if (auth[j] & m) {
685
			r = check(p, pkt, idx, reg);
686
			if (r) {
687
				return r;
688
			}
689
		}
690
		if (pkt->one_reg_wr) {
691
			if (!(auth[j] & m)) {
692
				break;
693
			}
694
		} else {
695
			reg += 4;
696
		}
697
	}
698
	return 0;
699
}
700
 
701
void r100_cs_dump_packet(struct radeon_cs_parser *p,
702
			 struct radeon_cs_packet *pkt)
703
{
704
	struct radeon_cs_chunk *ib_chunk;
705
	volatile uint32_t *ib;
706
	unsigned i;
707
	unsigned idx;
708
 
709
	ib = p->ib->ptr;
710
	ib_chunk = &p->chunks[p->chunk_ib_idx];
711
	idx = pkt->idx;
712
	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
713
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
714
	}
715
}
716
 
717
/**
718
 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
719
 * @parser:	parser structure holding parsing context.
720
 * @pkt:	where to store packet informations
721
 *
722
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
723
 * if packet is bigger than remaining ib size. or if packets is unknown.
724
 **/
725
int r100_cs_packet_parse(struct radeon_cs_parser *p,
726
			 struct radeon_cs_packet *pkt,
727
			 unsigned idx)
728
{
729
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
730
	uint32_t header = ib_chunk->kdata[idx];
731
 
732
	if (idx >= ib_chunk->length_dw) {
733
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
734
			  idx, ib_chunk->length_dw);
735
		return -EINVAL;
736
	}
737
	pkt->idx = idx;
738
	pkt->type = CP_PACKET_GET_TYPE(header);
739
	pkt->count = CP_PACKET_GET_COUNT(header);
740
	switch (pkt->type) {
741
	case PACKET_TYPE0:
742
		pkt->reg = CP_PACKET0_GET_REG(header);
743
		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
744
		break;
745
	case PACKET_TYPE3:
746
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
747
		break;
748
	case PACKET_TYPE2:
749
		pkt->count = -1;
750
		break;
751
	default:
752
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
753
		return -EINVAL;
754
	}
755
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
756
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
757
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
758
		return -EINVAL;
759
	}
760
	return 0;
761
}
762
 
763
/**
764
 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
765
 * @parser:		parser structure holding parsing context.
766
 * @data:		pointer to relocation data
767
 * @offset_start:	starting offset
768
 * @offset_mask:	offset mask (to align start offset on)
769
 * @reloc:		reloc informations
770
 *
771
 * Check next packet is relocation packet3, do bo validation and compute
772
 * GPU offset using the provided start.
773
 **/
774
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
775
			      struct radeon_cs_reloc **cs_reloc)
776
{
777
	struct radeon_cs_chunk *ib_chunk;
778
	struct radeon_cs_chunk *relocs_chunk;
779
	struct radeon_cs_packet p3reloc;
780
	unsigned idx;
781
	int r;
782
 
783
	if (p->chunk_relocs_idx == -1) {
784
		DRM_ERROR("No relocation chunk !\n");
785
		return -EINVAL;
786
	}
787
	*cs_reloc = NULL;
788
	ib_chunk = &p->chunks[p->chunk_ib_idx];
789
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
790
	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
791
	if (r) {
792
		return r;
793
	}
794
	p->idx += p3reloc.count + 2;
795
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
796
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
797
			  p3reloc.idx);
798
		r100_cs_dump_packet(p, &p3reloc);
799
		return -EINVAL;
800
	}
801
	idx = ib_chunk->kdata[p3reloc.idx + 1];
802
	if (idx >= relocs_chunk->length_dw) {
803
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
804
			  idx, relocs_chunk->length_dw);
805
		r100_cs_dump_packet(p, &p3reloc);
806
		return -EINVAL;
807
	}
808
	/* FIXME: we assume reloc size is 4 dwords */
809
	*cs_reloc = p->relocs_ptr[(idx / 4)];
810
	return 0;
811
}
812
 
813
static int r100_packet0_check(struct radeon_cs_parser *p,
814
			      struct radeon_cs_packet *pkt)
815
{
816
	struct radeon_cs_chunk *ib_chunk;
817
	struct radeon_cs_reloc *reloc;
818
	volatile uint32_t *ib;
819
	uint32_t tmp;
820
	unsigned reg;
821
	unsigned i;
822
	unsigned idx;
823
	bool onereg;
824
	int r;
825
 
826
	ib = p->ib->ptr;
827
	ib_chunk = &p->chunks[p->chunk_ib_idx];
828
	idx = pkt->idx + 1;
829
	reg = pkt->reg;
830
	onereg = false;
831
	if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
832
		onereg = true;
833
	}
834
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
835
		switch (reg) {
836
		/* FIXME: only allow PACKET3 blit? easier to check for out of
837
		 * range access */
838
		case RADEON_DST_PITCH_OFFSET:
839
		case RADEON_SRC_PITCH_OFFSET:
840
			r = r100_cs_packet_next_reloc(p, &reloc);
841
			if (r) {
842
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
843
					  idx, reg);
844
				r100_cs_dump_packet(p, pkt);
845
				return r;
846
			}
847
			tmp = ib_chunk->kdata[idx] & 0x003fffff;
848
			tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
849
			ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
850
			break;
851
		case RADEON_RB3D_DEPTHOFFSET:
852
		case RADEON_RB3D_COLOROFFSET:
853
		case R300_RB3D_COLOROFFSET0:
854
		case R300_ZB_DEPTHOFFSET:
855
		case R200_PP_TXOFFSET_0:
856
		case R200_PP_TXOFFSET_1:
857
		case R200_PP_TXOFFSET_2:
858
		case R200_PP_TXOFFSET_3:
859
		case R200_PP_TXOFFSET_4:
860
		case R200_PP_TXOFFSET_5:
861
		case RADEON_PP_TXOFFSET_0:
862
		case RADEON_PP_TXOFFSET_1:
863
		case RADEON_PP_TXOFFSET_2:
864
		case R300_TX_OFFSET_0:
865
		case R300_TX_OFFSET_0+4:
866
		case R300_TX_OFFSET_0+8:
867
		case R300_TX_OFFSET_0+12:
868
		case R300_TX_OFFSET_0+16:
869
		case R300_TX_OFFSET_0+20:
870
		case R300_TX_OFFSET_0+24:
871
		case R300_TX_OFFSET_0+28:
872
		case R300_TX_OFFSET_0+32:
873
		case R300_TX_OFFSET_0+36:
874
		case R300_TX_OFFSET_0+40:
875
		case R300_TX_OFFSET_0+44:
876
		case R300_TX_OFFSET_0+48:
877
		case R300_TX_OFFSET_0+52:
878
		case R300_TX_OFFSET_0+56:
879
		case R300_TX_OFFSET_0+60:
880
			r = r100_cs_packet_next_reloc(p, &reloc);
881
			if (r) {
882
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
883
					  idx, reg);
884
				r100_cs_dump_packet(p, pkt);
885
				return r;
886
			}
887
			ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
888
			break;
889
		default:
890
			/* FIXME: we don't want to allow anyothers packet */
891
			break;
892
		}
893
		if (onereg) {
894
			/* FIXME: forbid onereg write to register on relocate */
895
			break;
896
		}
897
	}
898
	return 0;
899
}
900
 
901
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
902
					 struct radeon_cs_packet *pkt,
903
					 struct radeon_object *robj)
904
{
905
	struct radeon_cs_chunk *ib_chunk;
906
	unsigned idx;
907
 
908
	ib_chunk = &p->chunks[p->chunk_ib_idx];
909
	idx = pkt->idx + 1;
910
	if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
911
		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
912
			  "(need %u have %lu) !\n",
913
			  ib_chunk->kdata[idx+2] + 1,
914
			  radeon_object_size(robj));
915
		return -EINVAL;
916
	}
917
	return 0;
918
}
919
 
920
static int r100_packet3_check(struct radeon_cs_parser *p,
921
			      struct radeon_cs_packet *pkt)
922
{
923
	struct radeon_cs_chunk *ib_chunk;
924
	struct radeon_cs_reloc *reloc;
925
	unsigned idx;
926
	unsigned i, c;
927
	volatile uint32_t *ib;
928
	int r;
929
 
930
	ib = p->ib->ptr;
931
	ib_chunk = &p->chunks[p->chunk_ib_idx];
932
	idx = pkt->idx + 1;
933
	switch (pkt->opcode) {
934
	case PACKET3_3D_LOAD_VBPNTR:
935
		c = ib_chunk->kdata[idx++];
936
		for (i = 0; i < (c - 1); i += 2, idx += 3) {
937
			r = r100_cs_packet_next_reloc(p, &reloc);
938
			if (r) {
939
				DRM_ERROR("No reloc for packet3 %d\n",
940
					  pkt->opcode);
941
				r100_cs_dump_packet(p, pkt);
942
				return r;
943
			}
944
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
945
			r = r100_cs_packet_next_reloc(p, &reloc);
946
			if (r) {
947
				DRM_ERROR("No reloc for packet3 %d\n",
948
					  pkt->opcode);
949
				r100_cs_dump_packet(p, pkt);
950
				return r;
951
			}
952
			ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
953
		}
954
		if (c & 1) {
955
			r = r100_cs_packet_next_reloc(p, &reloc);
956
			if (r) {
957
				DRM_ERROR("No reloc for packet3 %d\n",
958
					  pkt->opcode);
959
				r100_cs_dump_packet(p, pkt);
960
				return r;
961
			}
962
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
963
		}
964
		break;
965
	case PACKET3_INDX_BUFFER:
966
		r = r100_cs_packet_next_reloc(p, &reloc);
967
		if (r) {
968
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
969
			r100_cs_dump_packet(p, pkt);
970
			return r;
971
		}
972
		ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
973
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
974
		if (r) {
975
			return r;
976
		}
977
		break;
978
	case 0x23:
979
		/* FIXME: cleanup */
980
		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
981
		r = r100_cs_packet_next_reloc(p, &reloc);
982
		if (r) {
983
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
984
			r100_cs_dump_packet(p, pkt);
985
			return r;
986
		}
987
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
988
		break;
989
	case PACKET3_3D_DRAW_IMMD:
990
		/* triggers drawing using in-packet vertex data */
991
	case PACKET3_3D_DRAW_IMMD_2:
992
		/* triggers drawing using in-packet vertex data */
993
	case PACKET3_3D_DRAW_VBUF_2:
994
		/* triggers drawing of vertex buffers setup elsewhere */
995
	case PACKET3_3D_DRAW_INDX_2:
996
		/* triggers drawing using indices to vertex buffer */
997
	case PACKET3_3D_DRAW_VBUF:
998
		/* triggers drawing of vertex buffers setup elsewhere */
999
	case PACKET3_3D_DRAW_INDX:
1000
		/* triggers drawing using indices to vertex buffer */
1001
	case PACKET3_NOP:
1002
		break;
1003
	default:
1004
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1005
		return -EINVAL;
1006
	}
1007
	return 0;
1008
}
1009
 
1010
int r100_cs_parse(struct radeon_cs_parser *p)
1011
{
1012
	struct radeon_cs_packet pkt;
1013
	int r;
1014
 
1015
	do {
1016
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1017
		if (r) {
1018
			return r;
1019
		}
1020
		p->idx += pkt.count + 2;
1021
		switch (pkt.type) {
1022
			case PACKET_TYPE0:
1023
				r = r100_packet0_check(p, &pkt);
1024
				break;
1025
			case PACKET_TYPE2:
1026
				break;
1027
			case PACKET_TYPE3:
1028
				r = r100_packet3_check(p, &pkt);
1029
				break;
1030
			default:
1031
				DRM_ERROR("Unknown packet type %d !\n",
1032
					  pkt.type);
1033
				return -EINVAL;
1034
		}
1035
		if (r) {
1036
			return r;
1037
		}
1038
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1039
	return 0;
1040
}
1041
 
1128 serge 1042
#endif
1117 serge 1043
 
1044
/*
1045
 * Global GPU functions
1046
 */
1047
void r100_errata(struct radeon_device *rdev)
1048
{
1049
	rdev->pll_errata = 0;
1050
 
1051
	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1052
		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1053
	}
1054
 
1055
	if (rdev->family == CHIP_RV100 ||
1056
	    rdev->family == CHIP_RS100 ||
1057
	    rdev->family == CHIP_RS200) {
1058
		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1059
	}
1060
}
1061
 
1062
 
1063
 
1064
/* Wait for vertical sync on primary CRTC */
1065
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1066
{
1067
	uint32_t crtc_gen_cntl, tmp;
1068
	int i;
1069
 
1070
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1071
	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1072
	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1073
		return;
1074
	}
1075
	/* Clear the CRTC_VBLANK_SAVE bit */
1076
	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1077
	for (i = 0; i < rdev->usec_timeout; i++) {
1078
		tmp = RREG32(RADEON_CRTC_STATUS);
1079
		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1080
			return;
1081
		}
1082
		DRM_UDELAY(1);
1083
	}
1084
}
1085
 
1086
/* Wait for vertical sync on secondary CRTC */
1087
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1088
{
1089
	uint32_t crtc2_gen_cntl, tmp;
1090
	int i;
1091
 
1092
	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1093
	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1094
	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1095
		return;
1096
 
1097
	/* Clear the CRTC_VBLANK_SAVE bit */
1098
	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1099
	for (i = 0; i < rdev->usec_timeout; i++) {
1100
		tmp = RREG32(RADEON_CRTC2_STATUS);
1101
		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1102
			return;
1103
		}
1104
		DRM_UDELAY(1);
1105
	}
1106
}
1107
 
1108
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1109
{
1110
	unsigned i;
1111
	uint32_t tmp;
1112
 
1113
	for (i = 0; i < rdev->usec_timeout; i++) {
1114
		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1115
		if (tmp >= n) {
1116
			return 0;
1117
		}
1118
		DRM_UDELAY(1);
1119
	}
1120
	return -1;
1121
}
1122
 
1123
int r100_gui_wait_for_idle(struct radeon_device *rdev)
1124
{
1125
	unsigned i;
1126
	uint32_t tmp;
1127
 
1128
	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1129
		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1130
		       " Bad things might happen.\n");
1131
	}
1132
	for (i = 0; i < rdev->usec_timeout; i++) {
1133
		tmp = RREG32(RADEON_RBBM_STATUS);
1134
		if (!(tmp & (1 << 31))) {
1135
			return 0;
1136
		}
1137
		DRM_UDELAY(1);
1138
	}
1139
	return -1;
1140
}
1141
 
1142
int r100_mc_wait_for_idle(struct radeon_device *rdev)
1143
{
1144
	unsigned i;
1145
	uint32_t tmp;
1146
 
1147
	for (i = 0; i < rdev->usec_timeout; i++) {
1148
		/* read MC_STATUS */
1149
		tmp = RREG32(0x0150);
1150
		if (tmp & (1 << 2)) {
1151
			return 0;
1152
		}
1153
		DRM_UDELAY(1);
1154
	}
1155
	return -1;
1156
}
1157
 
1158
void r100_gpu_init(struct radeon_device *rdev)
1159
{
1160
	/* TODO: anythings to do here ? pipes ? */
1161
	r100_hdp_reset(rdev);
1162
}
1163
 
1164
void r100_hdp_reset(struct radeon_device *rdev)
1165
{
1166
	uint32_t tmp;
1167
 
1120 serge 1168
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 1169
 
1170
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1171
	tmp |= (7 << 28);
1172
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1173
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1174
	udelay(200);
1175
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1176
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
1177
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1178
}
1179
 
1180
int r100_rb2d_reset(struct radeon_device *rdev)
1181
{
1182
	uint32_t tmp;
1183
	int i;
1184
 
1120 serge 1185
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 1186
 
1187
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1188
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
1189
	udelay(200);
1190
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1191
	/* Wait to prevent race in RBBM_STATUS */
1192
	mdelay(1);
1193
	for (i = 0; i < rdev->usec_timeout; i++) {
1194
		tmp = RREG32(RADEON_RBBM_STATUS);
1195
		if (!(tmp & (1 << 26))) {
1196
			DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1197
				 tmp);
1198
			return 0;
1199
		}
1200
		DRM_UDELAY(1);
1201
	}
1202
	tmp = RREG32(RADEON_RBBM_STATUS);
1203
	DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1204
	return -1;
1205
}
1206
 
1207
int r100_gpu_reset(struct radeon_device *rdev)
1208
{
1209
	uint32_t status;
1210
 
1211
	/* reset order likely matter */
1212
	status = RREG32(RADEON_RBBM_STATUS);
1213
	/* reset HDP */
1214
	r100_hdp_reset(rdev);
1215
	/* reset rb2d */
1216
	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1217
		r100_rb2d_reset(rdev);
1218
	}
1219
	/* TODO: reset 3D engine */
1220
	/* reset CP */
1221
	status = RREG32(RADEON_RBBM_STATUS);
1222
	if (status & (1 << 16)) {
1223
		r100_cp_reset(rdev);
1224
	}
1225
	/* Check if GPU is idle */
1226
	status = RREG32(RADEON_RBBM_STATUS);
1227
	if (status & (1 << 31)) {
1228
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1229
		return -1;
1230
	}
1231
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1232
	return 0;
1233
}
1234
 
1235
 
1236
/*
1237
 * VRAM info
1238
 */
1239
static void r100_vram_get_type(struct radeon_device *rdev)
1240
{
1241
	uint32_t tmp;
1242
 
1243
	rdev->mc.vram_is_ddr = false;
1244
	if (rdev->flags & RADEON_IS_IGP)
1245
		rdev->mc.vram_is_ddr = true;
1246
	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1247
		rdev->mc.vram_is_ddr = true;
1248
	if ((rdev->family == CHIP_RV100) ||
1249
	    (rdev->family == CHIP_RS100) ||
1250
	    (rdev->family == CHIP_RS200)) {
1251
		tmp = RREG32(RADEON_MEM_CNTL);
1252
		if (tmp & RV100_HALF_MODE) {
1253
			rdev->mc.vram_width = 32;
1254
		} else {
1255
			rdev->mc.vram_width = 64;
1256
		}
1257
		if (rdev->flags & RADEON_SINGLE_CRTC) {
1258
			rdev->mc.vram_width /= 4;
1259
			rdev->mc.vram_is_ddr = true;
1260
		}
1261
	} else if (rdev->family <= CHIP_RV280) {
1262
		tmp = RREG32(RADEON_MEM_CNTL);
1263
		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1264
			rdev->mc.vram_width = 128;
1265
		} else {
1266
			rdev->mc.vram_width = 64;
1267
		}
1268
	} else {
1269
		/* newer IGPs */
1270
		rdev->mc.vram_width = 128;
1271
	}
1272
}
1273
 
1274
void r100_vram_info(struct radeon_device *rdev)
1275
{
1276
	r100_vram_get_type(rdev);
1277
 
1278
	if (rdev->flags & RADEON_IS_IGP) {
1279
		uint32_t tom;
1280
		/* read NB_TOM to get the amount of ram stolen for the GPU */
1281
		tom = RREG32(RADEON_NB_TOM);
1282
		rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1283
		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1284
	} else {
1285
		rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1286
		/* Some production boards of m6 will report 0
1287
		 * if it's 8 MB
1288
		 */
1289
		if (rdev->mc.vram_size == 0) {
1290
			rdev->mc.vram_size = 8192 * 1024;
1291
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1292
		}
1293
	}
1294
 
1295
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1296
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1297
}
1298
 
1299
/*
1300
 * Indirect registers accessor
1301
 */
1302
void r100_pll_errata_after_index(struct radeon_device *rdev)
1303
{
1304
	if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1305
		return;
1306
	}
1307
	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
1308
	(void)RREG32(RADEON_CRTC_GEN_CNTL);
1309
}
1310
 
1311
static void r100_pll_errata_after_data(struct radeon_device *rdev)
1312
{
1313
	/* This workarounds is necessary on RV100, RS100 and RS200 chips
1314
	 * or the chip could hang on a subsequent access
1315
	 */
1316
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1317
		udelay(5000);
1318
	}
1319
 
1320
	/* This function is required to workaround a hardware bug in some (all?)
1321
	 * revisions of the R300.  This workaround should be called after every
1322
	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
1323
	 * may not be correct.
1324
	 */
1325
	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1326
		uint32_t save, tmp;
1327
 
1328
		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1329
		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1330
		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1331
		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1332
		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1333
	}
1334
}
1335
 
1336
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1337
{
1338
	uint32_t data;
1339
 
1340
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1341
	r100_pll_errata_after_index(rdev);
1342
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
1343
	r100_pll_errata_after_data(rdev);
1344
	return data;
1345
}
1346
 
1347
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1348
{
1349
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1350
	r100_pll_errata_after_index(rdev);
1351
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
1352
	r100_pll_errata_after_data(rdev);
1353
}
1354
 
1355
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1356
{
1357
	if (reg < 0x10000)
1358
		return readl(((void __iomem *)rdev->rmmio) + reg);
1359
	else {
1360
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1361
		return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1362
	}
1363
}
1364
 
1365
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1366
{
1367
	if (reg < 0x10000)
1368
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
1369
	else {
1370
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1371
		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1372
	}
1373
}
1374
 
1375
int r100_init(struct radeon_device *rdev)
1376
{
1377
	return 0;
1378
}
1379