Subversion Repositories Kolibri OS

Rev

Rev 2005 | Rev 3120 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2005 Rev 2997
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include "radeon.h"
31
#include "radeon.h"
32
#include "radeon_asic.h"
32
#include "radeon_asic.h"
33
#include "rs400d.h"
33
#include "rs400d.h"
34
 
34
 
35
/* This files gather functions specifics to : rs400,rs480 */
35
/* This files gather functions specifics to : rs400,rs480 */
36
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
36
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
37
 
37
 
38
void rs400_gart_adjust_size(struct radeon_device *rdev)
38
void rs400_gart_adjust_size(struct radeon_device *rdev)
39
{
39
{
40
	/* Check gart size */
40
	/* Check gart size */
41
	switch (rdev->mc.gtt_size/(1024*1024)) {
41
	switch (rdev->mc.gtt_size/(1024*1024)) {
42
	case 32:
42
	case 32:
43
	case 64:
43
	case 64:
44
	case 128:
44
	case 128:
45
	case 256:
45
	case 256:
46
	case 512:
46
	case 512:
47
	case 1024:
47
	case 1024:
48
	case 2048:
48
	case 2048:
49
		break;
49
		break;
50
	default:
50
	default:
51
		DRM_ERROR("Unable to use IGP GART size %uM\n",
51
		DRM_ERROR("Unable to use IGP GART size %uM\n",
52
			  (unsigned)(rdev->mc.gtt_size >> 20));
52
			  (unsigned)(rdev->mc.gtt_size >> 20));
53
		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
53
		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
54
		DRM_ERROR("Forcing to 32M GART size\n");
54
		DRM_ERROR("Forcing to 32M GART size\n");
55
		rdev->mc.gtt_size = 32 * 1024 * 1024;
55
		rdev->mc.gtt_size = 32 * 1024 * 1024;
56
		return;
56
		return;
57
	}
57
	}
58
}
58
}
59
 
59
 
60
void rs400_gart_tlb_flush(struct radeon_device *rdev)
60
void rs400_gart_tlb_flush(struct radeon_device *rdev)
61
{
61
{
62
	uint32_t tmp;
62
	uint32_t tmp;
63
	unsigned int timeout = rdev->usec_timeout;
63
	unsigned int timeout = rdev->usec_timeout;
64
 
64
 
65
	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
65
	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
66
	do {
66
	do {
67
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
67
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
68
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
68
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
69
			break;
69
			break;
70
		DRM_UDELAY(1);
70
		DRM_UDELAY(1);
71
		timeout--;
71
		timeout--;
72
	} while (timeout > 0);
72
	} while (timeout > 0);
73
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
73
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
74
}
74
}
75
 
75
 
76
int rs400_gart_init(struct radeon_device *rdev)
76
int rs400_gart_init(struct radeon_device *rdev)
77
{
77
{
78
	int r;
78
	int r;
79
 
79
 
80
	if (rdev->gart.table.ram.ptr) {
80
	if (rdev->gart.ptr) {
81
		WARN(1, "RS400 GART already initialized\n");
81
		WARN(1, "RS400 GART already initialized\n");
82
		return 0;
82
		return 0;
83
	}
83
	}
84
	/* Check gart size */
84
	/* Check gart size */
85
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
85
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
86
	case 32:
86
	case 32:
87
	case 64:
87
	case 64:
88
	case 128:
88
	case 128:
89
	case 256:
89
	case 256:
90
	case 512:
90
	case 512:
91
	case 1024:
91
	case 1024:
92
	case 2048:
92
	case 2048:
93
		break;
93
		break;
94
	default:
94
	default:
95
		return -EINVAL;
95
		return -EINVAL;
96
	}
96
	}
97
	/* Initialize common gart structure */
97
	/* Initialize common gart structure */
98
	r = radeon_gart_init(rdev);
98
	r = radeon_gart_init(rdev);
99
	if (r)
99
	if (r)
100
		return r;
100
		return r;
101
	if (rs400_debugfs_pcie_gart_info_init(rdev))
101
	if (rs400_debugfs_pcie_gart_info_init(rdev))
102
		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
102
		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
103
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
103
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
104
	return radeon_gart_table_ram_alloc(rdev);
104
	return radeon_gart_table_ram_alloc(rdev);
105
}
105
}
106
 
106
 
107
int rs400_gart_enable(struct radeon_device *rdev)
107
int rs400_gart_enable(struct radeon_device *rdev)
108
{
108
{
109
	uint32_t size_reg;
109
	uint32_t size_reg;
110
	uint32_t tmp;
110
	uint32_t tmp;
111
 
111
 
112
	radeon_gart_restore(rdev);
112
	radeon_gart_restore(rdev);
113
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
113
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
114
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
114
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
115
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
115
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
116
	/* Check gart size */
116
	/* Check gart size */
117
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
117
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
118
	case 32:
118
	case 32:
119
		size_reg = RS480_VA_SIZE_32MB;
119
		size_reg = RS480_VA_SIZE_32MB;
120
		break;
120
		break;
121
	case 64:
121
	case 64:
122
		size_reg = RS480_VA_SIZE_64MB;
122
		size_reg = RS480_VA_SIZE_64MB;
123
		break;
123
		break;
124
	case 128:
124
	case 128:
125
		size_reg = RS480_VA_SIZE_128MB;
125
		size_reg = RS480_VA_SIZE_128MB;
126
		break;
126
		break;
127
	case 256:
127
	case 256:
128
		size_reg = RS480_VA_SIZE_256MB;
128
		size_reg = RS480_VA_SIZE_256MB;
129
		break;
129
		break;
130
	case 512:
130
	case 512:
131
		size_reg = RS480_VA_SIZE_512MB;
131
		size_reg = RS480_VA_SIZE_512MB;
132
		break;
132
		break;
133
	case 1024:
133
	case 1024:
134
		size_reg = RS480_VA_SIZE_1GB;
134
		size_reg = RS480_VA_SIZE_1GB;
135
		break;
135
		break;
136
	case 2048:
136
	case 2048:
137
		size_reg = RS480_VA_SIZE_2GB;
137
		size_reg = RS480_VA_SIZE_2GB;
138
		break;
138
		break;
139
	default:
139
	default:
140
		return -EINVAL;
140
		return -EINVAL;
141
	}
141
	}
142
	/* It should be fine to program it to max value */
142
	/* It should be fine to program it to max value */
143
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
143
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
144
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
144
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
145
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
145
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
146
	} else {
146
	} else {
147
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
147
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
148
		WREG32(RS480_AGP_BASE_2, 0);
148
		WREG32(RS480_AGP_BASE_2, 0);
149
	}
149
	}
150
	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
150
	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
151
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
151
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
152
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
152
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
153
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
153
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
154
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
154
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
155
		WREG32(RADEON_BUS_CNTL, tmp);
155
		WREG32(RADEON_BUS_CNTL, tmp);
156
	} else {
156
	} else {
157
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
157
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
158
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
158
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
159
		WREG32(RADEON_BUS_CNTL, tmp);
159
		WREG32(RADEON_BUS_CNTL, tmp);
160
	}
160
	}
161
	/* Table should be in 32bits address space so ignore bits above. */
161
	/* Table should be in 32bits address space so ignore bits above. */
162
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
162
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
163
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
163
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
164
 
164
 
165
	WREG32_MC(RS480_GART_BASE, tmp);
165
	WREG32_MC(RS480_GART_BASE, tmp);
166
	/* TODO: more tweaking here */
166
	/* TODO: more tweaking here */
167
	WREG32_MC(RS480_GART_FEATURE_ID,
167
	WREG32_MC(RS480_GART_FEATURE_ID,
168
		  (RS480_TLB_ENABLE |
168
		  (RS480_TLB_ENABLE |
169
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
169
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
170
	/* Disable snooping */
170
	/* Disable snooping */
171
	WREG32_MC(RS480_AGP_MODE_CNTL,
171
	WREG32_MC(RS480_AGP_MODE_CNTL,
172
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
172
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
173
	/* Disable AGP mode */
173
	/* Disable AGP mode */
174
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
174
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
175
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
175
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
176
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
176
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
177
		WREG32_MC(RS480_MC_MISC_CNTL,
177
		WREG32_MC(RS480_MC_MISC_CNTL,
178
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
178
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
179
	} else {
179
	} else {
180
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
180
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
181
	}
181
	}
182
	/* Enable gart */
182
	/* Enable gart */
183
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
183
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
184
	rs400_gart_tlb_flush(rdev);
184
	rs400_gart_tlb_flush(rdev);
-
 
185
	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-
 
186
		 (unsigned)(rdev->mc.gtt_size >> 20),
-
 
187
		 (unsigned long long)rdev->gart.table_addr);
185
	rdev->gart.ready = true;
188
	rdev->gart.ready = true;
186
	return 0;
189
	return 0;
187
}
190
}
188
 
191
 
189
void rs400_gart_disable(struct radeon_device *rdev)
192
void rs400_gart_disable(struct radeon_device *rdev)
190
{
193
{
191
	uint32_t tmp;
194
	uint32_t tmp;
192
 
195
 
193
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
196
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
194
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
197
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
195
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
198
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
196
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
199
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
197
}
200
}
198
 
201
 
199
void rs400_gart_fini(struct radeon_device *rdev)
202
void rs400_gart_fini(struct radeon_device *rdev)
200
{
203
{
201
	radeon_gart_fini(rdev);
204
	radeon_gart_fini(rdev);
202
	rs400_gart_disable(rdev);
205
	rs400_gart_disable(rdev);
203
	radeon_gart_table_ram_free(rdev);
206
	radeon_gart_table_ram_free(rdev);
204
}
207
}
205
 
208
 
206
#define RS400_PTE_WRITEABLE (1 << 2)
209
#define RS400_PTE_WRITEABLE (1 << 2)
207
#define RS400_PTE_READABLE  (1 << 3)
210
#define RS400_PTE_READABLE  (1 << 3)
208
 
211
 
209
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
212
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
210
{
213
{
211
	uint32_t entry;
214
	uint32_t entry;
-
 
215
	u32 *gtt = rdev->gart.ptr;
212
 
216
 
213
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
217
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
214
		return -EINVAL;
218
		return -EINVAL;
215
	}
219
	}
216
 
220
 
217
	entry = (lower_32_bits(addr) & PAGE_MASK) |
221
	entry = (lower_32_bits(addr) & PAGE_MASK) |
218
		((upper_32_bits(addr) & 0xff) << 4) |
222
		((upper_32_bits(addr) & 0xff) << 4) |
219
		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
223
		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
220
	entry = cpu_to_le32(entry);
224
	entry = cpu_to_le32(entry);
221
	rdev->gart.table.ram.ptr[i] = entry;
225
	gtt[i] = entry;
222
	return 0;
226
	return 0;
223
}
227
}
224
 
228
 
225
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
229
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
226
{
230
{
227
	unsigned i;
231
	unsigned i;
228
	uint32_t tmp;
232
	uint32_t tmp;
229
 
233
 
230
	for (i = 0; i < rdev->usec_timeout; i++) {
234
	for (i = 0; i < rdev->usec_timeout; i++) {
231
		/* read MC_STATUS */
235
		/* read MC_STATUS */
232
		tmp = RREG32(RADEON_MC_STATUS);
236
		tmp = RREG32(RADEON_MC_STATUS);
233
		if (tmp & RADEON_MC_IDLE) {
237
		if (tmp & RADEON_MC_IDLE) {
234
			return 0;
238
			return 0;
235
		}
239
		}
236
		DRM_UDELAY(1);
240
		DRM_UDELAY(1);
237
	}
241
	}
238
	return -1;
242
	return -1;
239
}
243
}
240
 
244
 
241
void rs400_gpu_init(struct radeon_device *rdev)
245
static void rs400_gpu_init(struct radeon_device *rdev)
242
{
246
{
243
	/* FIXME: is this correct ? */
247
	/* FIXME: is this correct ? */
244
	r420_pipes_init(rdev);
248
	r420_pipes_init(rdev);
245
	if (rs400_mc_wait_for_idle(rdev)) {
249
	if (rs400_mc_wait_for_idle(rdev)) {
246
		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
250
		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
247
		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
251
		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
248
	}
252
	}
249
}
253
}
250
 
254
 
251
void rs400_mc_init(struct radeon_device *rdev)
255
static void rs400_mc_init(struct radeon_device *rdev)
252
{
256
{
253
	u64 base;
257
	u64 base;
254
 
258
 
255
	rs400_gart_adjust_size(rdev);
259
	rs400_gart_adjust_size(rdev);
256
	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
260
	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
257
	/* DDR for all card after R300 & IGP */
261
	/* DDR for all card after R300 & IGP */
258
	rdev->mc.vram_is_ddr = true;
262
	rdev->mc.vram_is_ddr = true;
259
	rdev->mc.vram_width = 128;
263
	rdev->mc.vram_width = 128;
260
	r100_vram_init_sizes(rdev);
264
	r100_vram_init_sizes(rdev);
261
	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
265
	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
262
	radeon_vram_location(rdev, &rdev->mc, base);
266
	radeon_vram_location(rdev, &rdev->mc, base);
263
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
267
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
264
	radeon_gtt_location(rdev, &rdev->mc);
268
	radeon_gtt_location(rdev, &rdev->mc);
265
	radeon_update_bandwidth_info(rdev);
269
	radeon_update_bandwidth_info(rdev);
266
}
270
}
267
 
271
 
268
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
272
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
269
{
273
{
270
	uint32_t r;
274
	uint32_t r;
271
 
275
 
272
	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
276
	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
273
	r = RREG32(RS480_NB_MC_DATA);
277
	r = RREG32(RS480_NB_MC_DATA);
274
	WREG32(RS480_NB_MC_INDEX, 0xff);
278
	WREG32(RS480_NB_MC_INDEX, 0xff);
275
	return r;
279
	return r;
276
}
280
}
277
 
281
 
278
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
282
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
279
{
283
{
280
	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
284
	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
281
	WREG32(RS480_NB_MC_DATA, (v));
285
	WREG32(RS480_NB_MC_DATA, (v));
282
	WREG32(RS480_NB_MC_INDEX, 0xff);
286
	WREG32(RS480_NB_MC_INDEX, 0xff);
283
}
287
}
284
 
288
 
285
#if defined(CONFIG_DEBUG_FS)
289
#if defined(CONFIG_DEBUG_FS)
286
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
290
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
287
{
291
{
288
	struct drm_info_node *node = (struct drm_info_node *) m->private;
292
	struct drm_info_node *node = (struct drm_info_node *) m->private;
289
	struct drm_device *dev = node->minor->dev;
293
	struct drm_device *dev = node->minor->dev;
290
	struct radeon_device *rdev = dev->dev_private;
294
	struct radeon_device *rdev = dev->dev_private;
291
	uint32_t tmp;
295
	uint32_t tmp;
292
 
296
 
293
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
297
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
294
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
298
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
295
	tmp = RREG32(RADEON_BUS_CNTL);
299
	tmp = RREG32(RADEON_BUS_CNTL);
296
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
300
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
297
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
301
	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
298
	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
302
	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
299
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
303
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
300
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
304
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
301
		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
305
		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
302
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
306
		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
303
		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
307
		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
304
		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
308
		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
305
		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
309
		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
306
		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
310
		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
307
		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
311
		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
308
		tmp = RREG32(RS690_HDP_FB_LOCATION);
312
		tmp = RREG32(RS690_HDP_FB_LOCATION);
309
		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
313
		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
310
	} else {
314
	} else {
311
		tmp = RREG32(RADEON_AGP_BASE);
315
		tmp = RREG32(RADEON_AGP_BASE);
312
		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
316
		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
313
		tmp = RREG32(RS480_AGP_BASE_2);
317
		tmp = RREG32(RS480_AGP_BASE_2);
314
		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
318
		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
315
		tmp = RREG32(RADEON_MC_AGP_LOCATION);
319
		tmp = RREG32(RADEON_MC_AGP_LOCATION);
316
		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
320
		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
317
	}
321
	}
318
	tmp = RREG32_MC(RS480_GART_BASE);
322
	tmp = RREG32_MC(RS480_GART_BASE);
319
	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
323
	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
320
	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
324
	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
321
	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
325
	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
322
	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
326
	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
323
	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
327
	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
324
	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
328
	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
325
	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
329
	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
326
	tmp = RREG32_MC(0x5F);
330
	tmp = RREG32_MC(0x5F);
327
	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
331
	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
328
	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
332
	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
329
	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
333
	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
330
	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
334
	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
331
	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
335
	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
332
	tmp = RREG32_MC(0x3B);
336
	tmp = RREG32_MC(0x3B);
333
	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
337
	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
334
	tmp = RREG32_MC(0x3C);
338
	tmp = RREG32_MC(0x3C);
335
	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
339
	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
336
	tmp = RREG32_MC(0x30);
340
	tmp = RREG32_MC(0x30);
337
	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
341
	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
338
	tmp = RREG32_MC(0x31);
342
	tmp = RREG32_MC(0x31);
339
	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
343
	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
340
	tmp = RREG32_MC(0x32);
344
	tmp = RREG32_MC(0x32);
341
	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
345
	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
342
	tmp = RREG32_MC(0x33);
346
	tmp = RREG32_MC(0x33);
343
	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
347
	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
344
	tmp = RREG32_MC(0x34);
348
	tmp = RREG32_MC(0x34);
345
	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
349
	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
346
	tmp = RREG32_MC(0x35);
350
	tmp = RREG32_MC(0x35);
347
	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
351
	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
348
	tmp = RREG32_MC(0x36);
352
	tmp = RREG32_MC(0x36);
349
	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
353
	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
350
	tmp = RREG32_MC(0x37);
354
	tmp = RREG32_MC(0x37);
351
	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
355
	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
352
	return 0;
356
	return 0;
353
}
357
}
354
 
358
 
355
static struct drm_info_list rs400_gart_info_list[] = {
359
static struct drm_info_list rs400_gart_info_list[] = {
356
	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
360
	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
357
};
361
};
358
#endif
362
#endif
359
 
363
 
360
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
364
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
361
{
365
{
362
#if defined(CONFIG_DEBUG_FS)
366
#if defined(CONFIG_DEBUG_FS)
363
	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
367
	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
364
#else
368
#else
365
	return 0;
369
	return 0;
366
#endif
370
#endif
367
}
371
}
368
 
372
 
369
void rs400_mc_program(struct radeon_device *rdev)
373
static void rs400_mc_program(struct radeon_device *rdev)
370
{
374
{
371
	struct r100_mc_save save;
375
	struct r100_mc_save save;
372
 
376
 
373
	/* Stops all mc clients */
377
	/* Stops all mc clients */
374
	r100_mc_stop(rdev, &save);
378
	r100_mc_stop(rdev, &save);
375
 
379
 
376
	/* Wait for mc idle */
380
	/* Wait for mc idle */
377
	if (rs400_mc_wait_for_idle(rdev))
381
	if (rs400_mc_wait_for_idle(rdev))
378
		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
382
		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
379
	WREG32(R_000148_MC_FB_LOCATION,
383
	WREG32(R_000148_MC_FB_LOCATION,
380
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
384
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
381
		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
385
		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
382
 
386
 
383
	r100_mc_resume(rdev, &save);
387
	r100_mc_resume(rdev, &save);
384
}
388
}
385
 
389
 
386
static int rs400_startup(struct radeon_device *rdev)
390
static int rs400_startup(struct radeon_device *rdev)
387
{
391
{
388
	int r;
392
	int r;
389
 
393
 
390
	r100_set_common_regs(rdev);
394
	r100_set_common_regs(rdev);
391
 
395
 
392
	rs400_mc_program(rdev);
396
	rs400_mc_program(rdev);
393
	/* Resume clock */
397
	/* Resume clock */
394
	r300_clock_startup(rdev);
398
	r300_clock_startup(rdev);
395
	/* Initialize GPU configuration (# pipes, ...) */
399
	/* Initialize GPU configuration (# pipes, ...) */
396
	rs400_gpu_init(rdev);
400
	rs400_gpu_init(rdev);
397
	r100_enable_bm(rdev);
401
	r100_enable_bm(rdev);
398
	/* Initialize GART (initialize after TTM so we can allocate
402
	/* Initialize GART (initialize after TTM so we can allocate
399
	 * memory through TTM but finalize after TTM) */
403
	 * memory through TTM but finalize after TTM) */
400
	r = rs400_gart_enable(rdev);
404
	r = rs400_gart_enable(rdev);
401
	if (r)
405
	if (r)
402
		return r;
406
		return r;
403
 
407
 
404
	/* allocate wb buffer */
408
	/* allocate wb buffer */
405
	r = radeon_wb_init(rdev);
409
	r = radeon_wb_init(rdev);
406
	if (r)
410
	if (r)
407
		return r;
411
		return r;
408
 
412
 
409
	/* Enable IRQ */
413
	/* Enable IRQ */
410
	r100_irq_set(rdev);
414
	r100_irq_set(rdev);
411
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
415
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
412
	/* 1M ring buffer */
416
	/* 1M ring buffer */
413
   r = r100_cp_init(rdev, 1024 * 1024);
417
   r = r100_cp_init(rdev, 1024 * 1024);
414
   if (r) {
418
   if (r) {
415
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
419
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
416
       return r;
420
       return r;
417
   }
421
   }
-
 
422
 
418
	r = r100_ib_init(rdev);
423
	r = radeon_ib_pool_init(rdev);
419
	if (r) {
424
	if (r) {
420
		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
425
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
421
		return r;
426
		return r;
422
	}
427
	}
-
 
428
 
423
	return 0;
429
	return 0;
424
}
430
}
425
 
431
 
426
 
432
 
427
 
433
 
428
 
434
 
429
int rs400_init(struct radeon_device *rdev)
435
int rs400_init(struct radeon_device *rdev)
430
{
436
{
431
	int r;
437
	int r;
432
 
438
 
433
	/* Disable VGA */
439
	/* Disable VGA */
434
	r100_vga_render_disable(rdev);
440
	r100_vga_render_disable(rdev);
435
	/* Initialize scratch registers */
441
	/* Initialize scratch registers */
436
	radeon_scratch_init(rdev);
442
	radeon_scratch_init(rdev);
437
	/* Initialize surface registers */
443
	/* Initialize surface registers */
438
	radeon_surface_init(rdev);
444
	radeon_surface_init(rdev);
439
	/* TODO: disable VGA need to use VGA request */
445
	/* TODO: disable VGA need to use VGA request */
440
	/* restore some register to sane defaults */
446
	/* restore some register to sane defaults */
441
	r100_restore_sanity(rdev);
447
	r100_restore_sanity(rdev);
442
	/* BIOS*/
448
	/* BIOS*/
443
	if (!radeon_get_bios(rdev)) {
449
	if (!radeon_get_bios(rdev)) {
444
		if (ASIC_IS_AVIVO(rdev))
450
		if (ASIC_IS_AVIVO(rdev))
445
			return -EINVAL;
451
			return -EINVAL;
446
	}
452
	}
447
	if (rdev->is_atom_bios) {
453
	if (rdev->is_atom_bios) {
448
		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
454
		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
449
		return -EINVAL;
455
		return -EINVAL;
450
	} else {
456
	} else {
451
		r = radeon_combios_init(rdev);
457
		r = radeon_combios_init(rdev);
452
		if (r)
458
		if (r)
453
			return r;
459
			return r;
454
	}
460
	}
455
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
461
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
456
	if (radeon_asic_reset(rdev)) {
462
	if (radeon_asic_reset(rdev)) {
457
		dev_warn(rdev->dev,
463
		dev_warn(rdev->dev,
458
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
464
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
459
			RREG32(R_000E40_RBBM_STATUS),
465
			RREG32(R_000E40_RBBM_STATUS),
460
			RREG32(R_0007C0_CP_STAT));
466
			RREG32(R_0007C0_CP_STAT));
461
	}
467
	}
462
	/* check if cards are posted or not */
468
	/* check if cards are posted or not */
463
	if (radeon_boot_test_post_card(rdev) == false)
469
	if (radeon_boot_test_post_card(rdev) == false)
464
		return -EINVAL;
470
		return -EINVAL;
465
 
471
 
466
	/* Initialize clocks */
472
	/* Initialize clocks */
467
	radeon_get_clock_info(rdev->ddev);
473
	radeon_get_clock_info(rdev->ddev);
468
	/* initialize memory controller */
474
	/* initialize memory controller */
469
	rs400_mc_init(rdev);
475
	rs400_mc_init(rdev);
470
	/* Fence driver */
476
	/* Fence driver */
471
	r = radeon_fence_driver_init(rdev);
477
	r = radeon_fence_driver_init(rdev);
472
	if (r)
478
	if (r)
473
		return r;
479
		return r;
474
	r = radeon_irq_kms_init(rdev);
480
	r = radeon_irq_kms_init(rdev);
475
	if (r)
481
	if (r)
476
		return r;
482
		return r;
477
	/* Memory manager */
483
	/* Memory manager */
478
	r = radeon_bo_init(rdev);
484
	r = radeon_bo_init(rdev);
479
	if (r)
485
	if (r)
480
		return r;
486
		return r;
481
	r = rs400_gart_init(rdev);
487
	r = rs400_gart_init(rdev);
482
	if (r)
488
	if (r)
483
		return r;
489
		return r;
484
	r300_set_reg_safe(rdev);
490
	r300_set_reg_safe(rdev);
-
 
491
 
485
	rdev->accel_working = true;
492
	rdev->accel_working = true;
486
	r = rs400_startup(rdev);
493
	r = rs400_startup(rdev);
487
	if (r) {
494
	if (r) {
488
		/* Somethings want wront with the accel init stop accel */
495
		/* Somethings want wront with the accel init stop accel */
489
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
496
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
490
//		r100_cp_fini(rdev);
497
//		r100_cp_fini(rdev);
491
//		r100_wb_fini(rdev);
498
//		r100_wb_fini(rdev);
492
//		r100_ib_fini(rdev);
499
//		r100_ib_fini(rdev);
493
		rs400_gart_fini(rdev);
500
		rs400_gart_fini(rdev);
494
//		radeon_irq_kms_fini(rdev);
501
//		radeon_irq_kms_fini(rdev);
495
		rdev->accel_working = false;
502
		rdev->accel_working = false;
496
	}
503
	}
497
	return 0;
504
	return 0;
498
}
505
}