Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 2005 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1963 Rev 1986
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
-
 
29
#include 
29
#include 
30
#include 
30
#include "radeon.h"
31
#include "radeon.h"
31
#include "radeon_asic.h"
32
#include "radeon_asic.h"
32
#include "rs400d.h"
33
#include "rs400d.h"
33
 
34
 
34
/* This files gather functions specifics to : rs400,rs480 */
35
/* This files gather functions specifics to : rs400,rs480 */
35
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
36
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
36
 
37
 
37
void rs400_gart_adjust_size(struct radeon_device *rdev)
38
void rs400_gart_adjust_size(struct radeon_device *rdev)
38
{
39
{
39
	/* Check gart size */
40
	/* Check gart size */
40
	switch (rdev->mc.gtt_size/(1024*1024)) {
41
	switch (rdev->mc.gtt_size/(1024*1024)) {
41
	case 32:
42
	case 32:
42
	case 64:
43
	case 64:
43
	case 128:
44
	case 128:
44
	case 256:
45
	case 256:
45
	case 512:
46
	case 512:
46
	case 1024:
47
	case 1024:
47
	case 2048:
48
	case 2048:
48
		break;
49
		break;
49
	default:
50
	default:
50
		DRM_ERROR("Unable to use IGP GART size %uM\n",
51
		DRM_ERROR("Unable to use IGP GART size %uM\n",
51
			  (unsigned)(rdev->mc.gtt_size >> 20));
52
			  (unsigned)(rdev->mc.gtt_size >> 20));
52
		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
53
		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
53
		DRM_ERROR("Forcing to 32M GART size\n");
54
		DRM_ERROR("Forcing to 32M GART size\n");
54
		rdev->mc.gtt_size = 32 * 1024 * 1024;
55
		rdev->mc.gtt_size = 32 * 1024 * 1024;
55
		return;
56
		return;
56
	}
57
	}
57
}
58
}
58
 
59
 
59
void rs400_gart_tlb_flush(struct radeon_device *rdev)
60
void rs400_gart_tlb_flush(struct radeon_device *rdev)
60
{
61
{
61
	uint32_t tmp;
62
	uint32_t tmp;
62
	unsigned int timeout = rdev->usec_timeout;
63
	unsigned int timeout = rdev->usec_timeout;
63
 
64
 
64
	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
65
	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
65
	do {
66
	do {
66
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
67
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
67
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
68
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
68
			break;
69
			break;
69
		DRM_UDELAY(1);
70
		DRM_UDELAY(1);
70
		timeout--;
71
		timeout--;
71
	} while (timeout > 0);
72
	} while (timeout > 0);
72
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
73
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
73
}
74
}
74
 
75
 
75
int rs400_gart_init(struct radeon_device *rdev)
76
int rs400_gart_init(struct radeon_device *rdev)
76
{
77
{
77
	int r;
78
	int r;
78
 
79
 
79
	if (rdev->gart.table.ram.ptr) {
80
	if (rdev->gart.table.ram.ptr) {
80
		WARN(1, "RS400 GART already initialized\n");
81
		WARN(1, "RS400 GART already initialized\n");
81
		return 0;
82
		return 0;
82
	}
83
	}
83
	/* Check gart size */
84
	/* Check gart size */
84
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
85
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
85
	case 32:
86
	case 32:
86
	case 64:
87
	case 64:
87
	case 128:
88
	case 128:
88
	case 256:
89
	case 256:
89
	case 512:
90
	case 512:
90
	case 1024:
91
	case 1024:
91
	case 2048:
92
	case 2048:
92
		break;
93
		break;
93
	default:
94
	default:
94
		return -EINVAL;
95
		return -EINVAL;
95
	}
96
	}
96
	/* Initialize common gart structure */
97
	/* Initialize common gart structure */
97
	r = radeon_gart_init(rdev);
98
	r = radeon_gart_init(rdev);
98
	if (r)
99
	if (r)
99
		return r;
100
		return r;
100
	if (rs400_debugfs_pcie_gart_info_init(rdev))
101
	if (rs400_debugfs_pcie_gart_info_init(rdev))
101
		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
102
		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
102
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
103
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
103
	return radeon_gart_table_ram_alloc(rdev);
104
	return radeon_gart_table_ram_alloc(rdev);
104
}
105
}
105
 
106
 
106
int rs400_gart_enable(struct radeon_device *rdev)
107
int rs400_gart_enable(struct radeon_device *rdev)
107
{
108
{
108
	uint32_t size_reg;
109
	uint32_t size_reg;
109
	uint32_t tmp;
110
	uint32_t tmp;
110
 
111
 
111
	radeon_gart_restore(rdev);
112
	radeon_gart_restore(rdev);
112
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
113
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
113
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
114
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
114
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
115
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
115
	/* Check gart size */
116
	/* Check gart size */
116
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
117
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
117
	case 32:
118
	case 32:
118
		size_reg = RS480_VA_SIZE_32MB;
119
		size_reg = RS480_VA_SIZE_32MB;
119
		break;
120
		break;
120
	case 64:
121
	case 64:
121
		size_reg = RS480_VA_SIZE_64MB;
122
		size_reg = RS480_VA_SIZE_64MB;
122
		break;
123
		break;
123
	case 128:
124
	case 128:
124
		size_reg = RS480_VA_SIZE_128MB;
125
		size_reg = RS480_VA_SIZE_128MB;
125
		break;
126
		break;
126
	case 256:
127
	case 256:
127
		size_reg = RS480_VA_SIZE_256MB;
128
		size_reg = RS480_VA_SIZE_256MB;
128
		break;
129
		break;
129
	case 512:
130
	case 512:
130
		size_reg = RS480_VA_SIZE_512MB;
131
		size_reg = RS480_VA_SIZE_512MB;
131
		break;
132
		break;
132
	case 1024:
133
	case 1024:
133
		size_reg = RS480_VA_SIZE_1GB;
134
		size_reg = RS480_VA_SIZE_1GB;
134
		break;
135
		break;
135
	case 2048:
136
	case 2048:
136
		size_reg = RS480_VA_SIZE_2GB;
137
		size_reg = RS480_VA_SIZE_2GB;
137
		break;
138
		break;
138
	default:
139
	default:
139
		return -EINVAL;
140
		return -EINVAL;
140
	}
141
	}
141
	/* It should be fine to program it to max value */
142
	/* It should be fine to program it to max value */
142
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
143
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
143
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
144
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
144
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
145
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
145
	} else {
146
	} else {
146
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
147
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
147
		WREG32(RS480_AGP_BASE_2, 0);
148
		WREG32(RS480_AGP_BASE_2, 0);
148
	}
149
	}
149
	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
150
	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
150
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
151
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
151
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
152
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
152
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
153
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
153
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
154
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
154
		WREG32(RADEON_BUS_CNTL, tmp);
155
		WREG32(RADEON_BUS_CNTL, tmp);
155
	} else {
156
	} else {
156
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
157
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
157
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
158
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
158
		WREG32(RADEON_BUS_CNTL, tmp);
159
		WREG32(RADEON_BUS_CNTL, tmp);
159
	}
160
	}
160
	/* Table should be in 32bits address space so ignore bits above. */
161
	/* Table should be in 32bits address space so ignore bits above. */
161
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
162
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
162
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
163
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
163
 
164
 
164
	WREG32_MC(RS480_GART_BASE, tmp);
165
	WREG32_MC(RS480_GART_BASE, tmp);
165
	/* TODO: more tweaking here */
166
	/* TODO: more tweaking here */
166
	WREG32_MC(RS480_GART_FEATURE_ID,
167
	WREG32_MC(RS480_GART_FEATURE_ID,
167
		  (RS480_TLB_ENABLE |
168
		  (RS480_TLB_ENABLE |
168
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
169
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
169
	/* Disable snooping */
170
	/* Disable snooping */
170
	WREG32_MC(RS480_AGP_MODE_CNTL,
171
	WREG32_MC(RS480_AGP_MODE_CNTL,
171
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
172
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
172
	/* Disable AGP mode */
173
	/* Disable AGP mode */
173
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
174
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
174
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
175
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
175
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
176
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
176
		WREG32_MC(RS480_MC_MISC_CNTL,
177
		WREG32_MC(RS480_MC_MISC_CNTL,
177
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
178
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
178
	} else {
179
	} else {
179
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
180
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
180
	}
181
	}
181
	/* Enable gart */
182
	/* Enable gart */
182
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
183
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
183
	rs400_gart_tlb_flush(rdev);
184
	rs400_gart_tlb_flush(rdev);
184
	rdev->gart.ready = true;
185
	rdev->gart.ready = true;
185
	return 0;
186
	return 0;
186
}
187
}
187
 
188
 
188
void rs400_gart_disable(struct radeon_device *rdev)
189
void rs400_gart_disable(struct radeon_device *rdev)
189
{
190
{
190
	uint32_t tmp;
191
	uint32_t tmp;
191
 
192
 
192
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
193
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
193
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
194
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
194
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
195
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
195
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
196
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
196
}
197
}
197
 
198
 
198
void rs400_gart_fini(struct radeon_device *rdev)
199
void rs400_gart_fini(struct radeon_device *rdev)
199
{
200
{
200
	radeon_gart_fini(rdev);
201
	radeon_gart_fini(rdev);
201
	rs400_gart_disable(rdev);
202
	rs400_gart_disable(rdev);
202
	radeon_gart_table_ram_free(rdev);
203
	radeon_gart_table_ram_free(rdev);
203
}
204
}
204
 
205
 
205
#define RS400_PTE_WRITEABLE (1 << 2)
206
#define RS400_PTE_WRITEABLE (1 << 2)
206
#define RS400_PTE_READABLE  (1 << 3)
207
#define RS400_PTE_READABLE  (1 << 3)
207
 
208
 
208
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
209
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
209
{
210
{
210
	uint32_t entry;
211
	uint32_t entry;
211
 
212
 
212
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
213
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
213
		return -EINVAL;
214
		return -EINVAL;
214
	}
215
	}
215
 
216
 
216
	entry = (lower_32_bits(addr) & PAGE_MASK) |
217
	entry = (lower_32_bits(addr) & PAGE_MASK) |
217
		((upper_32_bits(addr) & 0xff) << 4) |
218
		((upper_32_bits(addr) & 0xff) << 4) |
218
		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
219
		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
219
	entry = cpu_to_le32(entry);
220
	entry = cpu_to_le32(entry);
220
	rdev->gart.table.ram.ptr[i] = entry;
221
	rdev->gart.table.ram.ptr[i] = entry;
221
	return 0;
222
	return 0;
222
}
223
}
223
 
224
 
224
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
225
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
225
{
226
{
226
	unsigned i;
227
	unsigned i;
227
	uint32_t tmp;
228
	uint32_t tmp;
228
 
229
 
229
	for (i = 0; i < rdev->usec_timeout; i++) {
230
	for (i = 0; i < rdev->usec_timeout; i++) {
230
		/* read MC_STATUS */
231
		/* read MC_STATUS */
231
		tmp = RREG32(RADEON_MC_STATUS);
232
		tmp = RREG32(RADEON_MC_STATUS);
232
		if (tmp & RADEON_MC_IDLE) {
233
		if (tmp & RADEON_MC_IDLE) {
233
			return 0;
234
			return 0;
234
		}
235
		}
235
		DRM_UDELAY(1);
236
		DRM_UDELAY(1);
236
	}
237
	}
237
	return -1;
238
	return -1;
238
}
239
}
239
 
240
 
240
void rs400_gpu_init(struct radeon_device *rdev)
241
void rs400_gpu_init(struct radeon_device *rdev)
241
{
242
{
242
	/* FIXME: is this correct ? */
243
	/* FIXME: is this correct ? */
243
	r420_pipes_init(rdev);
244
	r420_pipes_init(rdev);
244
	if (rs400_mc_wait_for_idle(rdev)) {
245
	if (rs400_mc_wait_for_idle(rdev)) {
245
		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
246
		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
246
		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
247
		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
247
	}
248
	}
248
}
249
}
249
 
250
 
250
void rs400_mc_init(struct radeon_device *rdev)
251
void rs400_mc_init(struct radeon_device *rdev)
251
{
252
{
252
	u64 base;
253
	u64 base;
253
 
254
 
254
	rs400_gart_adjust_size(rdev);
255
	rs400_gart_adjust_size(rdev);
255
	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
256
	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
256
	/* DDR for all card after R300 & IGP */
257
	/* DDR for all card after R300 & IGP */
257
	rdev->mc.vram_is_ddr = true;
258
	rdev->mc.vram_is_ddr = true;
258
	rdev->mc.vram_width = 128;
259
	rdev->mc.vram_width = 128;
259
	r100_vram_init_sizes(rdev);
260
	r100_vram_init_sizes(rdev);
260
	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
261
	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
261
	radeon_vram_location(rdev, &rdev->mc, base);
262
	radeon_vram_location(rdev, &rdev->mc, base);
262
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
263
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
263
	radeon_gtt_location(rdev, &rdev->mc);
264
	radeon_gtt_location(rdev, &rdev->mc);
264
	radeon_update_bandwidth_info(rdev);
265
	radeon_update_bandwidth_info(rdev);
265
}
266
}
266
 
267
 
267
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
268
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
268
{
269
{
269
	uint32_t r;
270
	uint32_t r;
270
 
271
 
271
	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
272
	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
272
	r = RREG32(RS480_NB_MC_DATA);
273
	r = RREG32(RS480_NB_MC_DATA);
273
	WREG32(RS480_NB_MC_INDEX, 0xff);
274
	WREG32(RS480_NB_MC_INDEX, 0xff);
274
	return r;
275
	return r;
275
}
276
}
276
 
277
 
277
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
278
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
278
{
279
{
279
	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
280
	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
280
	WREG32(RS480_NB_MC_DATA, (v));
281
	WREG32(RS480_NB_MC_DATA, (v));
281
	WREG32(RS480_NB_MC_INDEX, 0xff);
282
	WREG32(RS480_NB_MC_INDEX, 0xff);
282
}
283
}
283
 
284
 
284
#if defined(CONFIG_DEBUG_FS)
285
#if defined(CONFIG_DEBUG_FS)
285
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
286
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
286
{
287
{
287
	struct drm_info_node *node = (struct drm_info_node *) m->private;
288
	struct drm_info_node *node = (struct drm_info_node *) m->private;
288
	struct drm_device *dev = node->minor->dev;
289
	struct drm_device *dev = node->minor->dev;
289
	struct radeon_device *rdev = dev->dev_private;
290
	struct radeon_device *rdev = dev->dev_private;
290
	uint32_t tmp;
291
	uint32_t tmp;
291
 
292
 
292
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
293
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
293
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
294
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
294
	tmp = RREG32(RADEON_BUS_CNTL);
295
	tmp = RREG32(RADEON_BUS_CNTL);
295
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
296
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
296
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
297
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
297
	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
298
	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
298
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
299
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
299
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
300
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
300
		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
301
		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
301
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
302
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
302
		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
303
		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
303
		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
304
		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
304
		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
305
		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
305
		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
306
		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
306
		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
307
		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
307
		tmp = RREG32(RS690_HDP_FB_LOCATION);
308
		tmp = RREG32(RS690_HDP_FB_LOCATION);
308
		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
309
		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
309
	} else {
310
	} else {
310
		tmp = RREG32(RADEON_AGP_BASE);
311
		tmp = RREG32(RADEON_AGP_BASE);
311
		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
312
		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
312
		tmp = RREG32(RS480_AGP_BASE_2);
313
		tmp = RREG32(RS480_AGP_BASE_2);
313
		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
314
		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
314
		tmp = RREG32(RADEON_MC_AGP_LOCATION);
315
		tmp = RREG32(RADEON_MC_AGP_LOCATION);
315
		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
316
		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
316
	}
317
	}
317
	tmp = RREG32_MC(RS480_GART_BASE);
318
	tmp = RREG32_MC(RS480_GART_BASE);
318
	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
319
	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
319
	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
320
	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
320
	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
321
	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
321
	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
322
	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
322
	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
323
	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
323
	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
324
	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
324
	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
325
	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
325
	tmp = RREG32_MC(0x5F);
326
	tmp = RREG32_MC(0x5F);
326
	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
327
	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
327
	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
328
	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
328
	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
329
	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
329
	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
330
	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
330
	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
331
	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
331
	tmp = RREG32_MC(0x3B);
332
	tmp = RREG32_MC(0x3B);
332
	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
333
	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
333
	tmp = RREG32_MC(0x3C);
334
	tmp = RREG32_MC(0x3C);
334
	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
335
	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
335
	tmp = RREG32_MC(0x30);
336
	tmp = RREG32_MC(0x30);
336
	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
337
	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
337
	tmp = RREG32_MC(0x31);
338
	tmp = RREG32_MC(0x31);
338
	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
339
	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
339
	tmp = RREG32_MC(0x32);
340
	tmp = RREG32_MC(0x32);
340
	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
341
	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
341
	tmp = RREG32_MC(0x33);
342
	tmp = RREG32_MC(0x33);
342
	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
343
	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
343
	tmp = RREG32_MC(0x34);
344
	tmp = RREG32_MC(0x34);
344
	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
345
	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
345
	tmp = RREG32_MC(0x35);
346
	tmp = RREG32_MC(0x35);
346
	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
347
	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
347
	tmp = RREG32_MC(0x36);
348
	tmp = RREG32_MC(0x36);
348
	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
349
	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
349
	tmp = RREG32_MC(0x37);
350
	tmp = RREG32_MC(0x37);
350
	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
351
	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
351
	return 0;
352
	return 0;
352
}
353
}
353
 
354
 
354
static struct drm_info_list rs400_gart_info_list[] = {
355
static struct drm_info_list rs400_gart_info_list[] = {
355
	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
356
	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
356
};
357
};
357
#endif
358
#endif
358
 
359
 
359
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
360
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
360
{
361
{
361
#if defined(CONFIG_DEBUG_FS)
362
#if defined(CONFIG_DEBUG_FS)
362
	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
363
	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
363
#else
364
#else
364
	return 0;
365
	return 0;
365
#endif
366
#endif
366
}
367
}
367
 
368
 
368
void rs400_mc_program(struct radeon_device *rdev)
369
void rs400_mc_program(struct radeon_device *rdev)
369
{
370
{
370
	struct r100_mc_save save;
371
	struct r100_mc_save save;
371
 
372
 
372
	/* Stops all mc clients */
373
	/* Stops all mc clients */
373
	r100_mc_stop(rdev, &save);
374
	r100_mc_stop(rdev, &save);
374
 
375
 
375
	/* Wait for mc idle */
376
	/* Wait for mc idle */
376
	if (rs400_mc_wait_for_idle(rdev))
377
	if (rs400_mc_wait_for_idle(rdev))
377
		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
378
		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
378
	WREG32(R_000148_MC_FB_LOCATION,
379
	WREG32(R_000148_MC_FB_LOCATION,
379
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
380
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
380
		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
381
		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
381
 
382
 
382
	r100_mc_resume(rdev, &save);
383
	r100_mc_resume(rdev, &save);
383
}
384
}
384
 
385
 
385
static int rs400_startup(struct radeon_device *rdev)
386
static int rs400_startup(struct radeon_device *rdev)
386
{
387
{
387
	int r;
388
	int r;
388
 
389
 
389
	r100_set_common_regs(rdev);
390
	r100_set_common_regs(rdev);
390
 
391
 
391
	rs400_mc_program(rdev);
392
	rs400_mc_program(rdev);
392
	/* Resume clock */
393
	/* Resume clock */
393
	r300_clock_startup(rdev);
394
	r300_clock_startup(rdev);
394
	/* Initialize GPU configuration (# pipes, ...) */
395
	/* Initialize GPU configuration (# pipes, ...) */
395
	rs400_gpu_init(rdev);
396
	rs400_gpu_init(rdev);
396
	r100_enable_bm(rdev);
397
	r100_enable_bm(rdev);
397
	/* Initialize GART (initialize after TTM so we can allocate
398
	/* Initialize GART (initialize after TTM so we can allocate
398
	 * memory through TTM but finalize after TTM) */
399
	 * memory through TTM but finalize after TTM) */
399
	r = rs400_gart_enable(rdev);
400
	r = rs400_gart_enable(rdev);
400
	if (r)
401
	if (r)
401
		return r;
402
		return r;
402
	/* Enable IRQ */
403
	/* Enable IRQ */
403
//	r100_irq_set(rdev);
404
//	r100_irq_set(rdev);
404
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
405
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
405
	/* 1M ring buffer */
406
	/* 1M ring buffer */
406
   r = r100_cp_init(rdev, 1024 * 1024);
407
   r = r100_cp_init(rdev, 1024 * 1024);
407
   if (r) {
408
   if (r) {
408
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
409
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
409
       return r;
410
       return r;
410
   }
411
   }
411
//	r = r100_ib_init(rdev);
412
//	r = r100_ib_init(rdev);
412
//	if (r) {
413
//	if (r) {
413
//		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
414
//		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
414
//		return r;
415
//		return r;
415
//	}
416
//	}
416
	return 0;
417
	return 0;
417
}
418
}
418
 
419
 
419
 
420
 
420
 
421
 
421
 
422
 
422
int rs400_init(struct radeon_device *rdev)
423
int rs400_init(struct radeon_device *rdev)
423
{
424
{
424
	int r;
425
	int r;
425
 
426
 
426
	/* Disable VGA */
427
	/* Disable VGA */
427
	r100_vga_render_disable(rdev);
428
	r100_vga_render_disable(rdev);
428
	/* Initialize scratch registers */
429
	/* Initialize scratch registers */
429
	radeon_scratch_init(rdev);
430
	radeon_scratch_init(rdev);
430
	/* Initialize surface registers */
431
	/* Initialize surface registers */
431
	radeon_surface_init(rdev);
432
	radeon_surface_init(rdev);
432
	/* TODO: disable VGA need to use VGA request */
433
	/* TODO: disable VGA need to use VGA request */
433
	/* restore some register to sane defaults */
434
	/* restore some register to sane defaults */
434
	r100_restore_sanity(rdev);
435
	r100_restore_sanity(rdev);
435
	/* BIOS*/
436
	/* BIOS*/
436
	if (!radeon_get_bios(rdev)) {
437
	if (!radeon_get_bios(rdev)) {
437
		if (ASIC_IS_AVIVO(rdev))
438
		if (ASIC_IS_AVIVO(rdev))
438
			return -EINVAL;
439
			return -EINVAL;
439
	}
440
	}
440
	if (rdev->is_atom_bios) {
441
	if (rdev->is_atom_bios) {
441
		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
442
		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
442
		return -EINVAL;
443
		return -EINVAL;
443
	} else {
444
	} else {
444
		r = radeon_combios_init(rdev);
445
		r = radeon_combios_init(rdev);
445
		if (r)
446
		if (r)
446
			return r;
447
			return r;
447
	}
448
	}
448
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
449
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
449
	if (radeon_asic_reset(rdev)) {
450
	if (radeon_asic_reset(rdev)) {
450
		dev_warn(rdev->dev,
451
		dev_warn(rdev->dev,
451
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
452
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
452
			RREG32(R_000E40_RBBM_STATUS),
453
			RREG32(R_000E40_RBBM_STATUS),
453
			RREG32(R_0007C0_CP_STAT));
454
			RREG32(R_0007C0_CP_STAT));
454
	}
455
	}
455
	/* check if cards are posted or not */
456
	/* check if cards are posted or not */
456
	if (radeon_boot_test_post_card(rdev) == false)
457
	if (radeon_boot_test_post_card(rdev) == false)
457
		return -EINVAL;
458
		return -EINVAL;
458
 
459
 
459
	/* Initialize clocks */
460
	/* Initialize clocks */
460
	radeon_get_clock_info(rdev->ddev);
461
	radeon_get_clock_info(rdev->ddev);
461
	/* initialize memory controller */
462
	/* initialize memory controller */
462
	rs400_mc_init(rdev);
463
	rs400_mc_init(rdev);
463
	/* Fence driver */
464
	/* Fence driver */
464
//	r = radeon_fence_driver_init(rdev);
465
//	r = radeon_fence_driver_init(rdev);
465
//	if (r)
466
//	if (r)
466
//		return r;
467
//		return r;
467
//	r = radeon_irq_kms_init(rdev);
468
//	r = radeon_irq_kms_init(rdev);
468
//	if (r)
469
//	if (r)
469
//		return r;
470
//		return r;
470
	/* Memory manager */
471
	/* Memory manager */
471
	r = radeon_bo_init(rdev);
472
	r = radeon_bo_init(rdev);
472
	if (r)
473
	if (r)
473
		return r;
474
		return r;
474
	r = rs400_gart_init(rdev);
475
	r = rs400_gart_init(rdev);
475
	if (r)
476
	if (r)
476
		return r;
477
		return r;
477
	r300_set_reg_safe(rdev);
478
	r300_set_reg_safe(rdev);
478
	rdev->accel_working = true;
479
	rdev->accel_working = true;
479
	r = rs400_startup(rdev);
480
	r = rs400_startup(rdev);
480
	if (r) {
481
	if (r) {
481
		/* Somethings want wront with the accel init stop accel */
482
		/* Somethings want wront with the accel init stop accel */
482
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
483
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
483
//		r100_cp_fini(rdev);
484
//		r100_cp_fini(rdev);
484
//		r100_wb_fini(rdev);
485
//		r100_wb_fini(rdev);
485
//		r100_ib_fini(rdev);
486
//		r100_ib_fini(rdev);
486
		rs400_gart_fini(rdev);
487
		rs400_gart_fini(rdev);
487
//		radeon_irq_kms_fini(rdev);
488
//		radeon_irq_kms_fini(rdev);
488
		rdev->accel_working = false;
489
		rdev->accel_working = false;
489
	}
490
	}
490
	return 0;
491
	return 0;
491
}
492
}