Subversion Repositories Kolibri OS

Rev

Rev 1179 | Rev 1268 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1179 Rev 1221
Line 24... Line 24...
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include "drmP.h"
28
#include "drmP.h"
29
#include "radeon_reg.h"
-
 
30
#include "radeon.h"
29
#include "radeon.h"
31
#include "rs690r.h"
-
 
32
#include "atom.h"
30
#include "atom.h"
33
#include "atom-bits.h"
31
#include "rs690d.h"
Line 34... Line -...
34
 
-
 
35
/* rs690,rs740 depends on : */
-
 
36
void r100_hdp_reset(struct radeon_device *rdev);
-
 
37
int r300_mc_wait_for_idle(struct radeon_device *rdev);
-
 
38
void r420_pipes_init(struct radeon_device *rdev);
-
 
39
void rs400_gart_disable(struct radeon_device *rdev);
-
 
40
int rs400_gart_enable(struct radeon_device *rdev);
-
 
41
void rs400_gart_adjust_size(struct radeon_device *rdev);
-
 
42
void rs600_mc_disable_clients(struct radeon_device *rdev);
-
 
43
void rs600_disable_vga(struct radeon_device *rdev);
-
 
44
 
-
 
45
/* This files gather functions specifics to :
-
 
46
 * rs690,rs740
-
 
47
 *
-
 
48
 * Some of these functions might be used by newer ASICs.
-
 
49
 */
-
 
50
void rs690_gpu_init(struct radeon_device *rdev);
-
 
51
int rs690_mc_wait_for_idle(struct radeon_device *rdev);
-
 
52
 
-
 
53
 
-
 
54
/*
-
 
55
 * MC functions.
-
 
56
 */
-
 
57
int rs690_mc_init(struct radeon_device *rdev)
-
 
58
{
-
 
59
	uint32_t tmp;
-
 
60
	int r;
-
 
61
 
-
 
62
	if (r100_debugfs_rbbm_init(rdev)) {
-
 
63
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-
 
64
	}
-
 
65
 
-
 
66
	rs690_gpu_init(rdev);
-
 
67
	rs400_gart_disable(rdev);
-
 
68
 
-
 
69
	/* Setup GPU memory space */
-
 
70
	rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-
 
71
	rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
-
 
72
	rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
-
 
73
	rdev->mc.vram_location = 0xFFFFFFFFUL;
-
 
74
	r = radeon_mc_setup(rdev);
-
 
75
	if (r) {
-
 
76
		return r;
-
 
77
	}
-
 
78
 
-
 
79
	/* Program GPU memory space */
-
 
80
	rs600_mc_disable_clients(rdev);
-
 
81
	if (rs690_mc_wait_for_idle(rdev)) {
-
 
82
		printk(KERN_WARNING "Failed to wait MC idle while "
-
 
83
		       "programming pipes. Bad things might happen.\n");
-
 
84
	}
-
 
85
	tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-
 
86
	tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
-
 
87
	tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
-
 
88
	WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
-
 
89
	/* FIXME: Does this reg exist on RS480,RS740 ? */
-
 
90
	WREG32(0x310, rdev->mc.vram_location);
-
 
91
	WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
-
 
92
	return 0;
-
 
93
}
-
 
94
 
-
 
95
void rs690_mc_fini(struct radeon_device *rdev)
-
 
96
{
-
 
97
}
-
 
98
 
-
 
99
 
-
 
100
/*
-
 
101
 * Global GPU functions
-
 
102
 */
32
 
103
int rs690_mc_wait_for_idle(struct radeon_device *rdev)
33
static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
104
{
34
{
105
	unsigned i;
35
	unsigned i;
Line 106... Line 36...
106
	uint32_t tmp;
36
	uint32_t tmp;
107
 
37
 
108
	for (i = 0; i < rdev->usec_timeout; i++) {
38
	for (i = 0; i < rdev->usec_timeout; i++) {
109
		/* read MC_STATUS */
39
		/* read MC_STATUS */
110
		tmp = RREG32_MC(RS690_MC_STATUS);
40
		tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
111
		if (tmp & RS690_MC_STATUS_IDLE) {
-
 
112
			return 0;
41
		if (G_000090_MC_SYSTEM_IDLE(tmp))
113
		}
42
			return 0;
114
		DRM_UDELAY(1);
43
		udelay(1);
115
	}
44
	}
Line 116... Line -...
116
	return -1;
-
 
117
}
-
 
118
 
-
 
119
void rs690_errata(struct radeon_device *rdev)
-
 
120
{
-
 
121
	rdev->pll_errata = 0;
45
	return -1;
122
}
46
}
123
 
47
 
124
void rs690_gpu_init(struct radeon_device *rdev)
48
static void rs690_gpu_init(struct radeon_device *rdev)
125
{
-
 
126
	/* FIXME: HDP same place on rs690 ? */
49
{
127
	r100_hdp_reset(rdev);
50
	/* FIXME: HDP same place on rs690 ? */
128
	rs600_disable_vga(rdev);
51
	r100_hdp_reset(rdev);
129
	/* FIXME: is this correct ? */
52
	/* FIXME: is this correct ? */
130
	r420_pipes_init(rdev);
53
	r420_pipes_init(rdev);
131
	if (rs690_mc_wait_for_idle(rdev)) {
54
	if (rs690_mc_wait_for_idle(rdev)) {
132
		printk(KERN_WARNING "Failed to wait MC idle while "
55
		printk(KERN_WARNING "Failed to wait MC idle while "
Line 133... Line -...
133
		       "programming pipes. Bad things might happen.\n");
-
 
134
	}
-
 
135
}
-
 
136
 
-
 
137
 
56
		       "programming pipes. Bad things might happen.\n");
138
/*
57
	}
139
 * VRAM info.
58
}
140
 */
59
 
141
void rs690_pm_info(struct radeon_device *rdev)
60
void rs690_pm_info(struct radeon_device *rdev)
Line 249... Line 168...
249
	u32 tmp;
168
	u32 tmp;
Line 250... Line 169...
250
 
169
 
251
	/*
170
	/*
252
	 * Line Buffer Setup
171
	 * Line Buffer Setup
253
	 * There is a single line buffer shared by both display controllers.
172
	 * There is a single line buffer shared by both display controllers.
254
	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
173
	 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
255
	 * the display controllers.  The paritioning can either be done
174
	 * the display controllers.  The paritioning can either be done
256
	 * manually or via one of four preset allocations specified in bits 1:0:
175
	 * manually or via one of four preset allocations specified in bits 1:0:
257
	 *  0 - line buffer is divided in half and shared between crtc
176
	 *  0 - line buffer is divided in half and shared between crtc
258
	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
177
	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
259
	 *  2 - D1 gets the whole buffer
178
	 *  2 - D1 gets the whole buffer
260
	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
179
	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
261
	 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
180
	 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
262
	 * allocation mode. In manual allocation mode, D1 always starts at 0,
181
	 * allocation mode. In manual allocation mode, D1 always starts at 0,
263
	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
182
	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
264
	 */
183
	 */
265
	tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK;
184
	tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
266
	tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE;
185
	tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
267
	/* auto */
186
	/* auto */
268
	if (mode1 && mode2) {
187
	if (mode1 && mode2) {
269
		if (mode1->hdisplay > mode2->hdisplay) {
188
		if (mode1->hdisplay > mode2->hdisplay) {
270
			if (mode1->hdisplay > 2560)
189
			if (mode1->hdisplay > 2560)
271
				tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
190
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
272
			else
191
			else
273
				tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
192
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
274
		} else if (mode2->hdisplay > mode1->hdisplay) {
193
		} else if (mode2->hdisplay > mode1->hdisplay) {
275
			if (mode2->hdisplay > 2560)
194
			if (mode2->hdisplay > 2560)
276
				tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
195
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
277
			else
196
			else
278
				tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
197
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
279
		} else
198
		} else
280
			tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
199
			tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
281
	} else if (mode1) {
200
	} else if (mode1) {
282
		tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY;
201
		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
283
	} else if (mode2) {
202
	} else if (mode2) {
284
		tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
203
		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
285
	}
204
	}
286
	WREG32(DC_LB_MEMORY_SPLIT, tmp);
205
	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
Line 287... Line 206...
287
}
206
}
288
 
207
 
289
struct rs690_watermark {
208
struct rs690_watermark {
Line 486... Line 405...
486
	 * Set display0/1 priority up in the memory controller for
405
	 * Set display0/1 priority up in the memory controller for
487
	 * modes if the user specifies HIGH for displaypriority
406
	 * modes if the user specifies HIGH for displaypriority
488
	 * option.
407
	 * option.
489
	 */
408
	 */
490
	if (rdev->disp_priority == 2) {
409
	if (rdev->disp_priority == 2) {
491
		tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER);
410
		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
492
		tmp &= ~MC_DISP1R_INIT_LAT_MASK;
411
		tmp &= C_000104_MC_DISP0R_INIT_LAT;
493
		tmp &= ~MC_DISP0R_INIT_LAT_MASK;
412
		tmp &= C_000104_MC_DISP1R_INIT_LAT;
494
		if (mode1)
-
 
495
			tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
-
 
496
		if (mode0)
413
		if (mode0)
497
			tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
414
			tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
-
 
415
		if (mode1)
-
 
416
			tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
498
		WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp);
417
		WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
499
	}
418
	}
500
	rs690_line_buffer_adjust(rdev, mode0, mode1);
419
	rs690_line_buffer_adjust(rdev, mode0, mode1);
Line 501... Line 420...
501
 
420
 
502
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
421
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
503
		WREG32(DCP_CONTROL, 0);
422
		WREG32(R_006C9C_DCP_CONTROL, 0);
504
	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
423
	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
Line 505... Line 424...
505
		WREG32(DCP_CONTROL, 2);
424
		WREG32(R_006C9C_DCP_CONTROL, 2);
506
 
425
 
Line 507... Line 426...
507
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
426
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
508
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
427
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
509
 
428
 
Line 510... Line 429...
510
	tmp = (wm0.lb_request_fifo_depth - 1);
429
	tmp = (wm0.lb_request_fifo_depth - 1);
511
	tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
430
	tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
512
	WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
431
	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
513
 
432
 
Line 560... Line 479...
560
			priority_mark12.full = wm1.priority_mark.full;
479
			priority_mark12.full = wm1.priority_mark.full;
561
		if (rfixed_trunc(priority_mark12) < 0)
480
		if (rfixed_trunc(priority_mark12) < 0)
562
			priority_mark12.full = 0;
481
			priority_mark12.full = 0;
563
		if (wm1.priority_mark_max.full > priority_mark12.full)
482
		if (wm1.priority_mark_max.full > priority_mark12.full)
564
			priority_mark12.full = wm1.priority_mark_max.full;
483
			priority_mark12.full = wm1.priority_mark_max.full;
565
		WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
484
		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
566
		WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
485
		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
567
		WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
486
		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
568
		WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
487
		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
569
	} else if (mode0) {
488
	} else if (mode0) {
570
		if (rfixed_trunc(wm0.dbpp) > 64)
489
		if (rfixed_trunc(wm0.dbpp) > 64)
571
			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
490
			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
572
		else
491
		else
573
			a.full = wm0.num_line_pair.full;
492
			a.full = wm0.num_line_pair.full;
Line 590... Line 509...
590
			priority_mark02.full = wm0.priority_mark.full;
509
			priority_mark02.full = wm0.priority_mark.full;
591
		if (rfixed_trunc(priority_mark02) < 0)
510
		if (rfixed_trunc(priority_mark02) < 0)
592
			priority_mark02.full = 0;
511
			priority_mark02.full = 0;
593
		if (wm0.priority_mark_max.full > priority_mark02.full)
512
		if (wm0.priority_mark_max.full > priority_mark02.full)
594
			priority_mark02.full = wm0.priority_mark_max.full;
513
			priority_mark02.full = wm0.priority_mark_max.full;
595
		WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
514
		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
596
		WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
515
		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
597
		WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
516
		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
-
 
517
			S_006D48_D2MODE_PRIORITY_A_OFF(1));
598
		WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
518
		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
-
 
519
			S_006D4C_D2MODE_PRIORITY_B_OFF(1));
599
	} else {
520
	} else {
600
		if (rfixed_trunc(wm1.dbpp) > 64)
521
		if (rfixed_trunc(wm1.dbpp) > 64)
601
			a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
522
			a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
602
		else
523
		else
603
			a.full = wm1.num_line_pair.full;
524
			a.full = wm1.num_line_pair.full;
Line 620... Line 541...
620
			priority_mark12.full = wm1.priority_mark.full;
541
			priority_mark12.full = wm1.priority_mark.full;
621
		if (rfixed_trunc(priority_mark12) < 0)
542
		if (rfixed_trunc(priority_mark12) < 0)
622
			priority_mark12.full = 0;
543
			priority_mark12.full = 0;
623
		if (wm1.priority_mark_max.full > priority_mark12.full)
544
		if (wm1.priority_mark_max.full > priority_mark12.full)
624
			priority_mark12.full = wm1.priority_mark_max.full;
545
			priority_mark12.full = wm1.priority_mark_max.full;
625
		WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
546
		WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
-
 
547
			S_006548_D1MODE_PRIORITY_A_OFF(1));
626
		WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
548
		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
-
 
549
			S_00654C_D1MODE_PRIORITY_B_OFF(1));
627
		WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
550
		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
628
		WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
551
		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
629
	}
552
	}
630
}
553
}
Line 631... Line -...
631
 
-
 
632
/*
-
 
633
 * Indirect registers accessor
-
 
634
 */
554
 
635
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
555
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
636
{
556
{
Line 637... Line 557...
637
	uint32_t r;
557
	uint32_t r;
638
 
558
 
639
	WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
559
	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
640
	r = RREG32(RS690_MC_DATA);
560
	r = RREG32(R_00007C_MC_DATA);
641
	WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
561
	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
Line 642... Line 562...
642
	return r;
562
	return r;
643
}
563
}
644
 
564
 
645
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
565
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
646
{
566
{
647
	WREG32(RS690_MC_INDEX,
567
	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
-
 
568
		S_000078_MC_IND_WR_EN(1));
-
 
569
	WREG32(R_00007C_MC_DATA, v);
-
 
570
	WREG32(R_000078_MC_INDEX, 0x7F);
-
 
571
}
-
 
572
 
-
 
573
void rs690_mc_program(struct radeon_device *rdev)
-
 
574
{
-
 
575
	struct rv515_mc_save save;
-
 
576
 
-
 
577
	/* Stops all mc clients */
-
 
578
	rv515_mc_stop(rdev, &save);
-
 
579
 
-
 
580
	/* Wait for mc idle */
-
 
581
	if (rs690_mc_wait_for_idle(rdev))
-
 
582
		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
-
 
583
	/* Program MC, should be a 32bits limited address space */
-
 
584
	WREG32_MC(R_000100_MCCFG_FB_LOCATION,
-
 
585
			S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
-
 
586
			S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
-
 
587
	WREG32(R_000134_HDP_FB_LOCATION,
-
 
588
		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
-
 
589
 
-
 
590
	rv515_mc_resume(rdev, &save);
-
 
591
}
-
 
592
 
-
 
593
static int rs690_startup(struct radeon_device *rdev)
-
 
594
{
-
 
595
	int r;
-
 
596
 
-
 
597
	rs690_mc_program(rdev);
-
 
598
	/* Resume clock */
-
 
599
	rv515_clock_startup(rdev);
-
 
600
	/* Initialize GPU configuration (# pipes, ...) */
-
 
601
	rs690_gpu_init(rdev);
-
 
602
	/* Initialize GART (initialize after TTM so we can allocate
-
 
603
	 * memory through TTM but finalize after TTM) */
-
 
604
	r = rs400_gart_enable(rdev);
-
 
605
	if (r)
-
 
606
		return r;
-
 
607
	/* Enable IRQ */
-
 
608
//	rdev->irq.sw_int = true;
-
 
609
//	rs600_irq_set(rdev);
-
 
610
	/* 1M ring buffer */
-
 
611
//	r = r100_cp_init(rdev, 1024 * 1024);
-
 
612
//	if (r) {
-
 
613
//		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
-
 
614
//		return r;
-
 
615
//	}
-
 
616
//	r = r100_wb_init(rdev);
-
 
617
//	if (r)
-
 
618
//		dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
-
 
619
//	r = r100_ib_init(rdev);
-
 
620
//	if (r) {
-
 
621
//		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
-
 
622
//		return r;
-
 
623
//	}
-
 
624
	return 0;
-
 
625
}
-
 
626
 
-
 
627
 
-
 
628
 
-
 
629
 
-
 
630
int rs690_init(struct radeon_device *rdev)
-
 
631
{
-
 
632
	int r;
-
 
633
 
-
 
634
	/* Disable VGA */
-
 
635
	rv515_vga_render_disable(rdev);
-
 
636
	/* Initialize scratch registers */
-
 
637
	radeon_scratch_init(rdev);
-
 
638
	/* Initialize surface registers */
-
 
639
	radeon_surface_init(rdev);
-
 
640
	/* TODO: disable VGA need to use VGA request */
-
 
641
	/* BIOS*/
-
 
642
	if (!radeon_get_bios(rdev)) {
-
 
643
		if (ASIC_IS_AVIVO(rdev))
-
 
644
			return -EINVAL;
-
 
645
	}
-
 
646
	if (rdev->is_atom_bios) {
-
 
647
		r = radeon_atombios_init(rdev);
-
 
648
		if (r)
-
 
649
			return r;
-
 
650
	} else {
-
 
651
		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
-
 
652
		return -EINVAL;
-
 
653
	}
-
 
654
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-
 
655
	if (radeon_gpu_reset(rdev)) {
-
 
656
		dev_warn(rdev->dev,
-
 
657
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
-
 
658
			RREG32(R_000E40_RBBM_STATUS),
-
 
659
			RREG32(R_0007C0_CP_STAT));
-
 
660
	}
-
 
661
	/* check if cards are posted or not */
-
 
662
	if (!radeon_card_posted(rdev) && rdev->bios) {
-
 
663
		DRM_INFO("GPU not posted. posting now...\n");
-
 
664
		atom_asic_init(rdev->mode_info.atom_context);
-
 
665
	}
-
 
666
	/* Initialize clocks */
-
 
667
	radeon_get_clock_info(rdev->ddev);
-
 
668
	/* Get vram informations */
-
 
669
	rs690_vram_info(rdev);
-
 
670
	/* Initialize memory controller (also test AGP) */
-
 
671
	r = r420_mc_init(rdev);
-
 
672
	if (r)
-
 
673
		return r;
-
 
674
	rv515_debugfs(rdev);
-
 
675
	/* Fence driver */
-
 
676
//	r = radeon_fence_driver_init(rdev);
-
 
677
//	if (r)
-
 
678
//		return r;
-
 
679
//	r = radeon_irq_kms_init(rdev);
-
 
680
//	if (r)
-
 
681
//		return r;
-
 
682
	/* Memory manager */
-
 
683
	r = radeon_object_init(rdev);
-
 
684
	if (r)
-
 
685
		return r;
-
 
686
	r = rs400_gart_init(rdev);
-
 
687
	if (r)
-
 
688
		return r;
-
 
689
	rs600_set_safe_registers(rdev);
-
 
690
	rdev->accel_working = true;
-
 
691
	r = rs690_startup(rdev);
-
 
692
	if (r) {
-
 
693
		/* Somethings want wront with the accel init stop accel */
-
 
694
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
-
 
695
//		rs690_suspend(rdev);
-
 
696
//		r100_cp_fini(rdev);
-
 
697
//		r100_wb_fini(rdev);
-
 
698
//		r100_ib_fini(rdev);
-
 
699
		rs400_gart_fini(rdev);
-
 
700
//		radeon_irq_kms_fini(rdev);
648
	       RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
701
		rdev->accel_working = false;