Subversion Repositories Kolibri OS

Rev

Rev 1986 | Rev 2997 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1986 Rev 2005
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include "radeon.h"
31
#include "radeon.h"
32
#include "radeon_asic.h"
32
#include "radeon_asic.h"
33
#include "rs400d.h"
33
#include "rs400d.h"
34
 
34
 
35
/* This files gather functions specifics to : rs400,rs480 */
35
/* This files gather functions specifics to : rs400,rs480 */
36
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
36
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
37
 
37
 
38
void rs400_gart_adjust_size(struct radeon_device *rdev)
38
void rs400_gart_adjust_size(struct radeon_device *rdev)
39
{
39
{
40
	/* Check gart size */
40
	/* Check gart size */
41
	switch (rdev->mc.gtt_size/(1024*1024)) {
41
	switch (rdev->mc.gtt_size/(1024*1024)) {
42
	case 32:
42
	case 32:
43
	case 64:
43
	case 64:
44
	case 128:
44
	case 128:
45
	case 256:
45
	case 256:
46
	case 512:
46
	case 512:
47
	case 1024:
47
	case 1024:
48
	case 2048:
48
	case 2048:
49
		break;
49
		break;
50
	default:
50
	default:
51
		DRM_ERROR("Unable to use IGP GART size %uM\n",
51
		DRM_ERROR("Unable to use IGP GART size %uM\n",
52
			  (unsigned)(rdev->mc.gtt_size >> 20));
52
			  (unsigned)(rdev->mc.gtt_size >> 20));
53
		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
53
		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
54
		DRM_ERROR("Forcing to 32M GART size\n");
54
		DRM_ERROR("Forcing to 32M GART size\n");
55
		rdev->mc.gtt_size = 32 * 1024 * 1024;
55
		rdev->mc.gtt_size = 32 * 1024 * 1024;
56
		return;
56
		return;
57
	}
57
	}
58
}
58
}
59
 
59
 
60
void rs400_gart_tlb_flush(struct radeon_device *rdev)
60
void rs400_gart_tlb_flush(struct radeon_device *rdev)
61
{
61
{
62
	uint32_t tmp;
62
	uint32_t tmp;
63
	unsigned int timeout = rdev->usec_timeout;
63
	unsigned int timeout = rdev->usec_timeout;
64
 
64
 
65
	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
65
	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
66
	do {
66
	do {
67
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
67
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
68
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
68
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
69
			break;
69
			break;
70
		DRM_UDELAY(1);
70
		DRM_UDELAY(1);
71
		timeout--;
71
		timeout--;
72
	} while (timeout > 0);
72
	} while (timeout > 0);
73
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
73
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
74
}
74
}
75
 
75
 
76
int rs400_gart_init(struct radeon_device *rdev)
76
int rs400_gart_init(struct radeon_device *rdev)
77
{
77
{
78
	int r;
78
	int r;
79
 
79
 
80
	if (rdev->gart.table.ram.ptr) {
80
	if (rdev->gart.table.ram.ptr) {
81
		WARN(1, "RS400 GART already initialized\n");
81
		WARN(1, "RS400 GART already initialized\n");
82
		return 0;
82
		return 0;
83
	}
83
	}
84
	/* Check gart size */
84
	/* Check gart size */
85
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
85
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
86
	case 32:
86
	case 32:
87
	case 64:
87
	case 64:
88
	case 128:
88
	case 128:
89
	case 256:
89
	case 256:
90
	case 512:
90
	case 512:
91
	case 1024:
91
	case 1024:
92
	case 2048:
92
	case 2048:
93
		break;
93
		break;
94
	default:
94
	default:
95
		return -EINVAL;
95
		return -EINVAL;
96
	}
96
	}
97
	/* Initialize common gart structure */
97
	/* Initialize common gart structure */
98
	r = radeon_gart_init(rdev);
98
	r = radeon_gart_init(rdev);
99
	if (r)
99
	if (r)
100
		return r;
100
		return r;
101
	if (rs400_debugfs_pcie_gart_info_init(rdev))
101
	if (rs400_debugfs_pcie_gart_info_init(rdev))
102
		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
102
		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
103
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
103
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
104
	return radeon_gart_table_ram_alloc(rdev);
104
	return radeon_gart_table_ram_alloc(rdev);
105
}
105
}
106
 
106
 
107
int rs400_gart_enable(struct radeon_device *rdev)
107
int rs400_gart_enable(struct radeon_device *rdev)
108
{
108
{
109
	uint32_t size_reg;
109
	uint32_t size_reg;
110
	uint32_t tmp;
110
	uint32_t tmp;
111
 
111
 
112
	radeon_gart_restore(rdev);
112
	radeon_gart_restore(rdev);
113
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
113
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
114
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
114
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
115
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
115
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
116
	/* Check gart size */
116
	/* Check gart size */
117
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
117
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
118
	case 32:
118
	case 32:
119
		size_reg = RS480_VA_SIZE_32MB;
119
		size_reg = RS480_VA_SIZE_32MB;
120
		break;
120
		break;
121
	case 64:
121
	case 64:
122
		size_reg = RS480_VA_SIZE_64MB;
122
		size_reg = RS480_VA_SIZE_64MB;
123
		break;
123
		break;
124
	case 128:
124
	case 128:
125
		size_reg = RS480_VA_SIZE_128MB;
125
		size_reg = RS480_VA_SIZE_128MB;
126
		break;
126
		break;
127
	case 256:
127
	case 256:
128
		size_reg = RS480_VA_SIZE_256MB;
128
		size_reg = RS480_VA_SIZE_256MB;
129
		break;
129
		break;
130
	case 512:
130
	case 512:
131
		size_reg = RS480_VA_SIZE_512MB;
131
		size_reg = RS480_VA_SIZE_512MB;
132
		break;
132
		break;
133
	case 1024:
133
	case 1024:
134
		size_reg = RS480_VA_SIZE_1GB;
134
		size_reg = RS480_VA_SIZE_1GB;
135
		break;
135
		break;
136
	case 2048:
136
	case 2048:
137
		size_reg = RS480_VA_SIZE_2GB;
137
		size_reg = RS480_VA_SIZE_2GB;
138
		break;
138
		break;
139
	default:
139
	default:
140
		return -EINVAL;
140
		return -EINVAL;
141
	}
141
	}
142
	/* It should be fine to program it to max value */
142
	/* It should be fine to program it to max value */
143
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
143
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
144
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
144
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
145
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
145
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
146
	} else {
146
	} else {
147
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
147
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
148
		WREG32(RS480_AGP_BASE_2, 0);
148
		WREG32(RS480_AGP_BASE_2, 0);
149
	}
149
	}
150
	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
150
	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
151
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
151
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
152
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
152
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
153
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
153
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
154
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
154
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
155
		WREG32(RADEON_BUS_CNTL, tmp);
155
		WREG32(RADEON_BUS_CNTL, tmp);
156
	} else {
156
	} else {
157
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
157
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
158
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
158
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
159
		WREG32(RADEON_BUS_CNTL, tmp);
159
		WREG32(RADEON_BUS_CNTL, tmp);
160
	}
160
	}
161
	/* Table should be in 32bits address space so ignore bits above. */
161
	/* Table should be in 32bits address space so ignore bits above. */
162
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
162
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
163
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
163
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
164
 
164
 
165
	WREG32_MC(RS480_GART_BASE, tmp);
165
	WREG32_MC(RS480_GART_BASE, tmp);
166
	/* TODO: more tweaking here */
166
	/* TODO: more tweaking here */
167
	WREG32_MC(RS480_GART_FEATURE_ID,
167
	WREG32_MC(RS480_GART_FEATURE_ID,
168
		  (RS480_TLB_ENABLE |
168
		  (RS480_TLB_ENABLE |
169
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
169
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
170
	/* Disable snooping */
170
	/* Disable snooping */
171
	WREG32_MC(RS480_AGP_MODE_CNTL,
171
	WREG32_MC(RS480_AGP_MODE_CNTL,
172
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
172
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
173
	/* Disable AGP mode */
173
	/* Disable AGP mode */
174
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
174
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
175
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
175
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
176
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
176
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
177
		WREG32_MC(RS480_MC_MISC_CNTL,
177
		WREG32_MC(RS480_MC_MISC_CNTL,
178
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
178
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
179
	} else {
179
	} else {
180
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
180
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
181
	}
181
	}
182
	/* Enable gart */
182
	/* Enable gart */
183
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
183
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
184
	rs400_gart_tlb_flush(rdev);
184
	rs400_gart_tlb_flush(rdev);
185
	rdev->gart.ready = true;
185
	rdev->gart.ready = true;
186
	return 0;
186
	return 0;
187
}
187
}
188
 
188
 
189
void rs400_gart_disable(struct radeon_device *rdev)
189
void rs400_gart_disable(struct radeon_device *rdev)
190
{
190
{
191
	uint32_t tmp;
191
	uint32_t tmp;
192
 
192
 
193
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
193
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
194
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
194
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
195
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
195
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
196
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
196
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
197
}
197
}
198
 
198
 
199
void rs400_gart_fini(struct radeon_device *rdev)
199
void rs400_gart_fini(struct radeon_device *rdev)
200
{
200
{
201
	radeon_gart_fini(rdev);
201
	radeon_gart_fini(rdev);
202
	rs400_gart_disable(rdev);
202
	rs400_gart_disable(rdev);
203
	radeon_gart_table_ram_free(rdev);
203
	radeon_gart_table_ram_free(rdev);
204
}
204
}
205
 
205
 
206
#define RS400_PTE_WRITEABLE (1 << 2)
206
#define RS400_PTE_WRITEABLE (1 << 2)
207
#define RS400_PTE_READABLE  (1 << 3)
207
#define RS400_PTE_READABLE  (1 << 3)
208
 
208
 
209
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
209
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
210
{
210
{
211
	uint32_t entry;
211
	uint32_t entry;
212
 
212
 
213
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
213
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
214
		return -EINVAL;
214
		return -EINVAL;
215
	}
215
	}
216
 
216
 
217
	entry = (lower_32_bits(addr) & PAGE_MASK) |
217
	entry = (lower_32_bits(addr) & PAGE_MASK) |
218
		((upper_32_bits(addr) & 0xff) << 4) |
218
		((upper_32_bits(addr) & 0xff) << 4) |
219
		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
219
		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
220
	entry = cpu_to_le32(entry);
220
	entry = cpu_to_le32(entry);
221
	rdev->gart.table.ram.ptr[i] = entry;
221
	rdev->gart.table.ram.ptr[i] = entry;
222
	return 0;
222
	return 0;
223
}
223
}
224
 
224
 
225
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
225
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
226
{
226
{
227
	unsigned i;
227
	unsigned i;
228
	uint32_t tmp;
228
	uint32_t tmp;
229
 
229
 
230
	for (i = 0; i < rdev->usec_timeout; i++) {
230
	for (i = 0; i < rdev->usec_timeout; i++) {
231
		/* read MC_STATUS */
231
		/* read MC_STATUS */
232
		tmp = RREG32(RADEON_MC_STATUS);
232
		tmp = RREG32(RADEON_MC_STATUS);
233
		if (tmp & RADEON_MC_IDLE) {
233
		if (tmp & RADEON_MC_IDLE) {
234
			return 0;
234
			return 0;
235
		}
235
		}
236
		DRM_UDELAY(1);
236
		DRM_UDELAY(1);
237
	}
237
	}
238
	return -1;
238
	return -1;
239
}
239
}
240
 
240
 
241
void rs400_gpu_init(struct radeon_device *rdev)
241
void rs400_gpu_init(struct radeon_device *rdev)
242
{
242
{
243
	/* FIXME: is this correct ? */
243
	/* FIXME: is this correct ? */
244
	r420_pipes_init(rdev);
244
	r420_pipes_init(rdev);
245
	if (rs400_mc_wait_for_idle(rdev)) {
245
	if (rs400_mc_wait_for_idle(rdev)) {
246
		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
246
		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
247
		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
247
		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
248
	}
248
	}
249
}
249
}
250
 
250
 
251
void rs400_mc_init(struct radeon_device *rdev)
251
void rs400_mc_init(struct radeon_device *rdev)
252
{
252
{
253
	u64 base;
253
	u64 base;
254
 
254
 
255
	rs400_gart_adjust_size(rdev);
255
	rs400_gart_adjust_size(rdev);
256
	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
256
	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
257
	/* DDR for all card after R300 & IGP */
257
	/* DDR for all card after R300 & IGP */
258
	rdev->mc.vram_is_ddr = true;
258
	rdev->mc.vram_is_ddr = true;
259
	rdev->mc.vram_width = 128;
259
	rdev->mc.vram_width = 128;
260
	r100_vram_init_sizes(rdev);
260
	r100_vram_init_sizes(rdev);
261
	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
261
	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
262
	radeon_vram_location(rdev, &rdev->mc, base);
262
	radeon_vram_location(rdev, &rdev->mc, base);
263
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
263
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
264
	radeon_gtt_location(rdev, &rdev->mc);
264
	radeon_gtt_location(rdev, &rdev->mc);
265
	radeon_update_bandwidth_info(rdev);
265
	radeon_update_bandwidth_info(rdev);
266
}
266
}
267
 
267
 
268
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
268
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
269
{
269
{
270
	uint32_t r;
270
	uint32_t r;
271
 
271
 
272
	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
272
	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
273
	r = RREG32(RS480_NB_MC_DATA);
273
	r = RREG32(RS480_NB_MC_DATA);
274
	WREG32(RS480_NB_MC_INDEX, 0xff);
274
	WREG32(RS480_NB_MC_INDEX, 0xff);
275
	return r;
275
	return r;
276
}
276
}
277
 
277
 
278
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
278
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
279
{
279
{
280
	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
280
	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
281
	WREG32(RS480_NB_MC_DATA, (v));
281
	WREG32(RS480_NB_MC_DATA, (v));
282
	WREG32(RS480_NB_MC_INDEX, 0xff);
282
	WREG32(RS480_NB_MC_INDEX, 0xff);
283
}
283
}
284
 
284
 
285
#if defined(CONFIG_DEBUG_FS)
285
#if defined(CONFIG_DEBUG_FS)
286
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
286
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
287
{
287
{
288
	struct drm_info_node *node = (struct drm_info_node *) m->private;
288
	struct drm_info_node *node = (struct drm_info_node *) m->private;
289
	struct drm_device *dev = node->minor->dev;
289
	struct drm_device *dev = node->minor->dev;
290
	struct radeon_device *rdev = dev->dev_private;
290
	struct radeon_device *rdev = dev->dev_private;
291
	uint32_t tmp;
291
	uint32_t tmp;
292
 
292
 
293
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
293
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
294
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
294
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
295
	tmp = RREG32(RADEON_BUS_CNTL);
295
	tmp = RREG32(RADEON_BUS_CNTL);
296
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
296
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
297
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
297
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
298
	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
298
	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
299
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
299
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
300
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
300
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
301
		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
301
		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
302
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
302
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
303
		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
303
		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
304
		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
304
		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
305
		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
305
		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
306
		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
306
		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
307
		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
307
		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
308
		tmp = RREG32(RS690_HDP_FB_LOCATION);
308
		tmp = RREG32(RS690_HDP_FB_LOCATION);
309
		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
309
		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
310
	} else {
310
	} else {
311
		tmp = RREG32(RADEON_AGP_BASE);
311
		tmp = RREG32(RADEON_AGP_BASE);
312
		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
312
		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
313
		tmp = RREG32(RS480_AGP_BASE_2);
313
		tmp = RREG32(RS480_AGP_BASE_2);
314
		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
314
		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
315
		tmp = RREG32(RADEON_MC_AGP_LOCATION);
315
		tmp = RREG32(RADEON_MC_AGP_LOCATION);
316
		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
316
		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
317
	}
317
	}
318
	tmp = RREG32_MC(RS480_GART_BASE);
318
	tmp = RREG32_MC(RS480_GART_BASE);
319
	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
319
	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
320
	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
320
	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
321
	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
321
	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
322
	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
322
	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
323
	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
323
	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
324
	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
324
	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
325
	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
325
	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
326
	tmp = RREG32_MC(0x5F);
326
	tmp = RREG32_MC(0x5F);
327
	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
327
	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
328
	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
328
	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
329
	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
329
	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
330
	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
330
	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
331
	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
331
	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
332
	tmp = RREG32_MC(0x3B);
332
	tmp = RREG32_MC(0x3B);
333
	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
333
	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
334
	tmp = RREG32_MC(0x3C);
334
	tmp = RREG32_MC(0x3C);
335
	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
335
	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
336
	tmp = RREG32_MC(0x30);
336
	tmp = RREG32_MC(0x30);
337
	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
337
	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
338
	tmp = RREG32_MC(0x31);
338
	tmp = RREG32_MC(0x31);
339
	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
339
	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
340
	tmp = RREG32_MC(0x32);
340
	tmp = RREG32_MC(0x32);
341
	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
341
	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
342
	tmp = RREG32_MC(0x33);
342
	tmp = RREG32_MC(0x33);
343
	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
343
	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
344
	tmp = RREG32_MC(0x34);
344
	tmp = RREG32_MC(0x34);
345
	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
345
	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
346
	tmp = RREG32_MC(0x35);
346
	tmp = RREG32_MC(0x35);
347
	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
347
	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
348
	tmp = RREG32_MC(0x36);
348
	tmp = RREG32_MC(0x36);
349
	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
349
	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
350
	tmp = RREG32_MC(0x37);
350
	tmp = RREG32_MC(0x37);
351
	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
351
	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
352
	return 0;
352
	return 0;
353
}
353
}
354
 
354
 
355
static struct drm_info_list rs400_gart_info_list[] = {
355
static struct drm_info_list rs400_gart_info_list[] = {
356
	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
356
	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
357
};
357
};
358
#endif
358
#endif
359
 
359
 
360
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
360
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
361
{
361
{
362
#if defined(CONFIG_DEBUG_FS)
362
#if defined(CONFIG_DEBUG_FS)
363
	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
363
	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
364
#else
364
#else
365
	return 0;
365
	return 0;
366
#endif
366
#endif
367
}
367
}
368
 
368
 
369
void rs400_mc_program(struct radeon_device *rdev)
369
void rs400_mc_program(struct radeon_device *rdev)
370
{
370
{
371
	struct r100_mc_save save;
371
	struct r100_mc_save save;
372
 
372
 
373
	/* Stops all mc clients */
373
	/* Stops all mc clients */
374
	r100_mc_stop(rdev, &save);
374
	r100_mc_stop(rdev, &save);
375
 
375
 
376
	/* Wait for mc idle */
376
	/* Wait for mc idle */
377
	if (rs400_mc_wait_for_idle(rdev))
377
	if (rs400_mc_wait_for_idle(rdev))
378
		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
378
		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
379
	WREG32(R_000148_MC_FB_LOCATION,
379
	WREG32(R_000148_MC_FB_LOCATION,
380
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
380
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
381
		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
381
		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
382
 
382
 
383
	r100_mc_resume(rdev, &save);
383
	r100_mc_resume(rdev, &save);
384
}
384
}
385
 
385
 
386
static int rs400_startup(struct radeon_device *rdev)
386
static int rs400_startup(struct radeon_device *rdev)
387
{
387
{
388
	int r;
388
	int r;
389
 
389
 
390
	r100_set_common_regs(rdev);
390
	r100_set_common_regs(rdev);
391
 
391
 
392
	rs400_mc_program(rdev);
392
	rs400_mc_program(rdev);
393
	/* Resume clock */
393
	/* Resume clock */
394
	r300_clock_startup(rdev);
394
	r300_clock_startup(rdev);
395
	/* Initialize GPU configuration (# pipes, ...) */
395
	/* Initialize GPU configuration (# pipes, ...) */
396
	rs400_gpu_init(rdev);
396
	rs400_gpu_init(rdev);
397
	r100_enable_bm(rdev);
397
	r100_enable_bm(rdev);
398
	/* Initialize GART (initialize after TTM so we can allocate
398
	/* Initialize GART (initialize after TTM so we can allocate
399
	 * memory through TTM but finalize after TTM) */
399
	 * memory through TTM but finalize after TTM) */
400
	r = rs400_gart_enable(rdev);
400
	r = rs400_gart_enable(rdev);
401
	if (r)
401
	if (r)
402
		return r;
402
		return r;
-
 
403
 
-
 
404
	/* allocate wb buffer */
-
 
405
	r = radeon_wb_init(rdev);
-
 
406
	if (r)
-
 
407
		return r;
-
 
408
 
403
	/* Enable IRQ */
409
	/* Enable IRQ */
404
//	r100_irq_set(rdev);
410
	r100_irq_set(rdev);
405
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
411
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
406
	/* 1M ring buffer */
412
	/* 1M ring buffer */
407
   r = r100_cp_init(rdev, 1024 * 1024);
413
   r = r100_cp_init(rdev, 1024 * 1024);
408
   if (r) {
414
   if (r) {
409
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
415
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
410
       return r;
416
       return r;
411
   }
417
   }
412
//	r = r100_ib_init(rdev);
418
	r = r100_ib_init(rdev);
413
//	if (r) {
419
	if (r) {
414
//		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
420
		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
415
//		return r;
421
		return r;
416
//	}
422
	}
417
	return 0;
423
	return 0;
418
}
424
}
419
 
425
 
420
 
426
 
421
 
427
 
422
 
428
 
423
int rs400_init(struct radeon_device *rdev)
429
int rs400_init(struct radeon_device *rdev)
424
{
430
{
425
	int r;
431
	int r;
426
 
432
 
427
	/* Disable VGA */
433
	/* Disable VGA */
428
	r100_vga_render_disable(rdev);
434
	r100_vga_render_disable(rdev);
429
	/* Initialize scratch registers */
435
	/* Initialize scratch registers */
430
	radeon_scratch_init(rdev);
436
	radeon_scratch_init(rdev);
431
	/* Initialize surface registers */
437
	/* Initialize surface registers */
432
	radeon_surface_init(rdev);
438
	radeon_surface_init(rdev);
433
	/* TODO: disable VGA need to use VGA request */
439
	/* TODO: disable VGA need to use VGA request */
434
	/* restore some register to sane defaults */
440
	/* restore some register to sane defaults */
435
	r100_restore_sanity(rdev);
441
	r100_restore_sanity(rdev);
436
	/* BIOS*/
442
	/* BIOS*/
437
	if (!radeon_get_bios(rdev)) {
443
	if (!radeon_get_bios(rdev)) {
438
		if (ASIC_IS_AVIVO(rdev))
444
		if (ASIC_IS_AVIVO(rdev))
439
			return -EINVAL;
445
			return -EINVAL;
440
	}
446
	}
441
	if (rdev->is_atom_bios) {
447
	if (rdev->is_atom_bios) {
442
		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
448
		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
443
		return -EINVAL;
449
		return -EINVAL;
444
	} else {
450
	} else {
445
		r = radeon_combios_init(rdev);
451
		r = radeon_combios_init(rdev);
446
		if (r)
452
		if (r)
447
			return r;
453
			return r;
448
	}
454
	}
449
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
455
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
450
	if (radeon_asic_reset(rdev)) {
456
	if (radeon_asic_reset(rdev)) {
451
		dev_warn(rdev->dev,
457
		dev_warn(rdev->dev,
452
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
458
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
453
			RREG32(R_000E40_RBBM_STATUS),
459
			RREG32(R_000E40_RBBM_STATUS),
454
			RREG32(R_0007C0_CP_STAT));
460
			RREG32(R_0007C0_CP_STAT));
455
	}
461
	}
456
	/* check if cards are posted or not */
462
	/* check if cards are posted or not */
457
	if (radeon_boot_test_post_card(rdev) == false)
463
	if (radeon_boot_test_post_card(rdev) == false)
458
		return -EINVAL;
464
		return -EINVAL;
459
 
465
 
460
	/* Initialize clocks */
466
	/* Initialize clocks */
461
	radeon_get_clock_info(rdev->ddev);
467
	radeon_get_clock_info(rdev->ddev);
462
	/* initialize memory controller */
468
	/* initialize memory controller */
463
	rs400_mc_init(rdev);
469
	rs400_mc_init(rdev);
464
	/* Fence driver */
470
	/* Fence driver */
465
//	r = radeon_fence_driver_init(rdev);
471
	r = radeon_fence_driver_init(rdev);
466
//	if (r)
472
	if (r)
467
//		return r;
473
		return r;
468
//	r = radeon_irq_kms_init(rdev);
474
	r = radeon_irq_kms_init(rdev);
469
//	if (r)
475
	if (r)
470
//		return r;
476
		return r;
471
	/* Memory manager */
477
	/* Memory manager */
472
	r = radeon_bo_init(rdev);
478
	r = radeon_bo_init(rdev);
473
	if (r)
479
	if (r)
474
		return r;
480
		return r;
475
	r = rs400_gart_init(rdev);
481
	r = rs400_gart_init(rdev);
476
	if (r)
482
	if (r)
477
		return r;
483
		return r;
478
	r300_set_reg_safe(rdev);
484
	r300_set_reg_safe(rdev);
479
	rdev->accel_working = true;
485
	rdev->accel_working = true;
480
	r = rs400_startup(rdev);
486
	r = rs400_startup(rdev);
481
	if (r) {
487
	if (r) {
482
		/* Somethings want wront with the accel init stop accel */
488
		/* Somethings want wront with the accel init stop accel */
483
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
489
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
484
//		r100_cp_fini(rdev);
490
//		r100_cp_fini(rdev);
485
//		r100_wb_fini(rdev);
491
//		r100_wb_fini(rdev);
486
//		r100_ib_fini(rdev);
492
//		r100_ib_fini(rdev);
487
		rs400_gart_fini(rdev);
493
		rs400_gart_fini(rdev);
488
//		radeon_irq_kms_fini(rdev);
494
//		radeon_irq_kms_fini(rdev);
489
		rdev->accel_working = false;
495
		rdev->accel_working = false;
490
	}
496
	}
491
	return 0;
497
	return 0;
492
}
498
}