Subversion Repositories Kolibri OS

Rev

Rev 1129 | Rev 1221 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1129 Rev 1179
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include "drmP.h"
28
#include "drmP.h"
29
#include "radeon_reg.h"
29
#include "radeon_reg.h"
30
#include "radeon.h"
30
#include "radeon.h"
31
 
31
 
32
/* r520,rv530,rv560,rv570,r580 depends on : */
32
/* r520,rv530,rv560,rv570,r580 depends on : */
33
void r100_hdp_reset(struct radeon_device *rdev);
33
void r100_hdp_reset(struct radeon_device *rdev);
34
int rv370_pcie_gart_enable(struct radeon_device *rdev);
-
 
35
void rv370_pcie_gart_disable(struct radeon_device *rdev);
-
 
36
void r420_pipes_init(struct radeon_device *rdev);
34
void r420_pipes_init(struct radeon_device *rdev);
37
void rs600_mc_disable_clients(struct radeon_device *rdev);
35
void rs600_mc_disable_clients(struct radeon_device *rdev);
38
void rs600_disable_vga(struct radeon_device *rdev);
36
void rs600_disable_vga(struct radeon_device *rdev);
39
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
37
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
40
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
38
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
41
 
39
 
42
/* This files gather functions specifics to:
40
/* This files gather functions specifics to:
43
 * r520,rv530,rv560,rv570,r580
41
 * r520,rv530,rv560,rv570,r580
44
 *
42
 *
45
 * Some of these functions might be used by newer ASICs.
43
 * Some of these functions might be used by newer ASICs.
46
 */
44
 */
47
void r520_gpu_init(struct radeon_device *rdev);
45
void r520_gpu_init(struct radeon_device *rdev);
48
int r520_mc_wait_for_idle(struct radeon_device *rdev);
46
int r520_mc_wait_for_idle(struct radeon_device *rdev);
-
 
47
 
49
 
48
 
50
/*
49
/*
51
 * MC
50
 * MC
52
 */
51
 */
53
int r520_mc_init(struct radeon_device *rdev)
52
int r520_mc_init(struct radeon_device *rdev)
54
{
53
{
55
	uint32_t tmp;
54
	uint32_t tmp;
56
	int r;
55
	int r;
57
 
56
 
58
    dbgprintf("%s\n",__FUNCTION__);
57
       ENTER();
59
 
58
 
60
   if (r100_debugfs_rbbm_init(rdev)) {
59
   if (r100_debugfs_rbbm_init(rdev)) {
61
       DRM_ERROR("Failed to register debugfs file for RBBM !\n");
60
       DRM_ERROR("Failed to register debugfs file for RBBM !\n");
62
   }
61
   }
63
   if (rv515_debugfs_pipes_info_init(rdev)) {
62
   if (rv515_debugfs_pipes_info_init(rdev)) {
64
       DRM_ERROR("Failed to register debugfs file for pipes !\n");
63
       DRM_ERROR("Failed to register debugfs file for pipes !\n");
65
   }
64
   }
66
   if (rv515_debugfs_ga_info_init(rdev)) {
65
   if (rv515_debugfs_ga_info_init(rdev)) {
67
       DRM_ERROR("Failed to register debugfs file for pipes !\n");
66
       DRM_ERROR("Failed to register debugfs file for pipes !\n");
68
   }
67
   }
69
 
68
 
70
	r520_gpu_init(rdev);
69
	r520_gpu_init(rdev);
71
	rv370_pcie_gart_disable(rdev);
70
	rv370_pcie_gart_disable(rdev);
72
 
71
 
73
	/* Setup GPU memory space */
72
	/* Setup GPU memory space */
74
	rdev->mc.vram_location = 0xFFFFFFFFUL;
73
	rdev->mc.vram_location = 0xFFFFFFFFUL;
75
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
74
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
76
	if (rdev->flags & RADEON_IS_AGP) {
-
 
77
		r = radeon_agp_init(rdev);
-
 
78
		if (r) {
-
 
79
			printk(KERN_WARNING "[drm] Disabling AGP\n");
-
 
80
			rdev->flags &= ~RADEON_IS_AGP;
-
 
81
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-
 
82
		} else {
-
 
83
			rdev->mc.gtt_location = rdev->mc.agp_base;
-
 
84
		}
-
 
85
	}
-
 
86
	r = radeon_mc_setup(rdev);
75
	r = radeon_mc_setup(rdev);
87
	if (r) {
76
	if (r) {
88
		return r;
77
		return r;
89
	}
78
	}
90
 
79
 
91
	/* Program GPU memory space */
80
	/* Program GPU memory space */
92
    rs600_mc_disable_clients(rdev);
81
    rs600_mc_disable_clients(rdev);
93
    if (r520_mc_wait_for_idle(rdev)) {
82
    if (r520_mc_wait_for_idle(rdev)) {
94
       printk(KERN_WARNING "Failed to wait MC idle while "
83
       printk(KERN_WARNING "Failed to wait MC idle while "
95
		       "programming pipes. Bad things might happen.\n");
84
		       "programming pipes. Bad things might happen.\n");
96
	}
85
	}
97
	/* Write VRAM size in case we are limiting it */
86
	/* Write VRAM size in case we are limiting it */
98
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
87
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
99
	tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
88
	tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
100
	tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
89
	tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
101
	tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
90
	tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
102
	WREG32_MC(R520_MC_FB_LOCATION, tmp);
91
	WREG32_MC(R520_MC_FB_LOCATION, tmp);
103
	WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
92
	WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
104
	WREG32(0x310, rdev->mc.vram_location);
93
	WREG32(0x310, rdev->mc.vram_location);
105
	if (rdev->flags & RADEON_IS_AGP) {
94
	if (rdev->flags & RADEON_IS_AGP) {
106
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
95
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
107
		tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
96
		tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
108
		tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
97
		tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
109
		WREG32_MC(R520_MC_AGP_LOCATION, tmp);
98
		WREG32_MC(R520_MC_AGP_LOCATION, tmp);
110
		WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
99
		WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
111
		WREG32_MC(R520_MC_AGP_BASE_2, 0);
100
		WREG32_MC(R520_MC_AGP_BASE_2, 0);
112
	} else {
101
	} else {
113
		WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
102
		WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
114
		WREG32_MC(R520_MC_AGP_BASE, 0);
103
		WREG32_MC(R520_MC_AGP_BASE, 0);
115
		WREG32_MC(R520_MC_AGP_BASE_2, 0);
104
		WREG32_MC(R520_MC_AGP_BASE_2, 0);
116
	}
105
	}
117
 
106
 
118
    dbgprintf("done: %s\n",__FUNCTION__);
107
    LEAVE();
119
 
108
 
120
	return 0;
109
	return 0;
121
}
110
}
122
 
111
 
123
void r520_mc_fini(struct radeon_device *rdev)
112
void r520_mc_fini(struct radeon_device *rdev)
124
{
113
{
125
	rv370_pcie_gart_disable(rdev);
-
 
126
	radeon_gart_table_vram_free(rdev);
-
 
127
	radeon_gart_fini(rdev);
-
 
128
}
114
}
129
 
115
 
130
 
116
 
131
/*
117
/*
132
 * Global GPU functions
118
 * Global GPU functions
133
 */
119
 */
134
void r520_errata(struct radeon_device *rdev)
120
void r520_errata(struct radeon_device *rdev)
135
{
121
{
136
	rdev->pll_errata = 0;
122
	rdev->pll_errata = 0;
137
}
123
}
138
 
124
 
139
int r520_mc_wait_for_idle(struct radeon_device *rdev)
125
int r520_mc_wait_for_idle(struct radeon_device *rdev)
140
{
126
{
141
	unsigned i;
127
	unsigned i;
142
	uint32_t tmp;
128
	uint32_t tmp;
143
 
129
 
144
	for (i = 0; i < rdev->usec_timeout; i++) {
130
	for (i = 0; i < rdev->usec_timeout; i++) {
145
		/* read MC_STATUS */
131
		/* read MC_STATUS */
146
		tmp = RREG32_MC(R520_MC_STATUS);
132
		tmp = RREG32_MC(R520_MC_STATUS);
147
		if (tmp & R520_MC_STATUS_IDLE) {
133
		if (tmp & R520_MC_STATUS_IDLE) {
148
			return 0;
134
			return 0;
149
		}
135
		}
150
		DRM_UDELAY(1);
136
		DRM_UDELAY(1);
151
	}
137
	}
152
	return -1;
138
	return -1;
153
}
139
}
154
 
140
 
155
void r520_gpu_init(struct radeon_device *rdev)
141
void r520_gpu_init(struct radeon_device *rdev)
156
{
142
{
157
	unsigned pipe_select_current, gb_pipe_select, tmp;
143
	unsigned pipe_select_current, gb_pipe_select, tmp;
158
    dbgprintf("%s\n",__FUNCTION__);
144
    ENTER();
159
 
145
 
160
	r100_hdp_reset(rdev);
146
	r100_hdp_reset(rdev);
161
	rs600_disable_vga(rdev);
147
	rs600_disable_vga(rdev);
162
	/*
148
	/*
163
	 * DST_PIPE_CONFIG		0x170C
149
	 * DST_PIPE_CONFIG		0x170C
164
	 * GB_TILE_CONFIG		0x4018
150
	 * GB_TILE_CONFIG		0x4018
165
	 * GB_FIFO_SIZE			0x4024
151
	 * GB_FIFO_SIZE			0x4024
166
	 * GB_PIPE_SELECT		0x402C
152
	 * GB_PIPE_SELECT		0x402C
167
	 * GB_PIPE_SELECT2              0x4124
153
	 * GB_PIPE_SELECT2              0x4124
168
	 *	Z_PIPE_SHIFT			0
154
	 *	Z_PIPE_SHIFT			0
169
	 *	Z_PIPE_MASK			0x000000003
155
	 *	Z_PIPE_MASK			0x000000003
170
	 * GB_FIFO_SIZE2                0x4128
156
	 * GB_FIFO_SIZE2                0x4128
171
	 *	SC_SFIFO_SIZE_SHIFT		0
157
	 *	SC_SFIFO_SIZE_SHIFT		0
172
	 *	SC_SFIFO_SIZE_MASK		0x000000003
158
	 *	SC_SFIFO_SIZE_MASK		0x000000003
173
	 *	SC_MFIFO_SIZE_SHIFT		2
159
	 *	SC_MFIFO_SIZE_SHIFT		2
174
	 *	SC_MFIFO_SIZE_MASK		0x00000000C
160
	 *	SC_MFIFO_SIZE_MASK		0x00000000C
175
	 *	FG_SFIFO_SIZE_SHIFT		4
161
	 *	FG_SFIFO_SIZE_SHIFT		4
176
	 *	FG_SFIFO_SIZE_MASK		0x000000030
162
	 *	FG_SFIFO_SIZE_MASK		0x000000030
177
	 *	ZB_MFIFO_SIZE_SHIFT		6
163
	 *	ZB_MFIFO_SIZE_SHIFT		6
178
	 *	ZB_MFIFO_SIZE_MASK		0x0000000C0
164
	 *	ZB_MFIFO_SIZE_MASK		0x0000000C0
179
	 * GA_ENHANCE			0x4274
165
	 * GA_ENHANCE			0x4274
180
	 * SU_REG_DEST			0x42C8
166
	 * SU_REG_DEST			0x42C8
181
	 */
167
	 */
182
	/* workaround for RV530 */
168
	/* workaround for RV530 */
183
	if (rdev->family == CHIP_RV530) {
169
	if (rdev->family == CHIP_RV530) {
184
		WREG32(0x4124, 1);
-
 
185
		WREG32(0x4128, 0xFF);
170
		WREG32(0x4128, 0xFF);
186
	}
171
	}
187
	r420_pipes_init(rdev);
172
	r420_pipes_init(rdev);
188
	gb_pipe_select = RREG32(0x402C);
173
	gb_pipe_select = RREG32(0x402C);
189
	tmp = RREG32(0x170C);
174
	tmp = RREG32(0x170C);
190
	pipe_select_current = (tmp >> 2) & 3;
175
	pipe_select_current = (tmp >> 2) & 3;
191
	tmp = (1 << pipe_select_current) |
176
	tmp = (1 << pipe_select_current) |
192
	      (((gb_pipe_select >> 8) & 0xF) << 4);
177
	      (((gb_pipe_select >> 8) & 0xF) << 4);
193
	WREG32_PLL(0x000D, tmp);
178
	WREG32_PLL(0x000D, tmp);
194
	if (r520_mc_wait_for_idle(rdev)) {
179
	if (r520_mc_wait_for_idle(rdev)) {
195
		printk(KERN_WARNING "Failed to wait MC idle while "
180
		printk(KERN_WARNING "Failed to wait MC idle while "
196
		       "programming pipes. Bad things might happen.\n");
181
		       "programming pipes. Bad things might happen.\n");
197
	}
182
	}
198
}
183
}
199
 
184
 
200
 
185
 
201
/*
186
/*
202
 * VRAM info
187
 * VRAM info
203
 */
188
 */
204
static void r520_vram_get_type(struct radeon_device *rdev)
189
static void r520_vram_get_type(struct radeon_device *rdev)
205
{
190
{
206
	uint32_t tmp;
191
	uint32_t tmp;
207
    dbgprintf("%s\n",__FUNCTION__);
192
    ENTER();
208
 
193
 
209
	rdev->mc.vram_width = 128;
194
	rdev->mc.vram_width = 128;
210
	rdev->mc.vram_is_ddr = true;
195
	rdev->mc.vram_is_ddr = true;
211
	tmp = RREG32_MC(R520_MC_CNTL0);
196
	tmp = RREG32_MC(R520_MC_CNTL0);
212
	switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
197
	switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
213
	case 0:
198
	case 0:
214
		rdev->mc.vram_width = 32;
199
		rdev->mc.vram_width = 32;
215
		break;
200
		break;
216
	case 1:
201
	case 1:
217
		rdev->mc.vram_width = 64;
202
		rdev->mc.vram_width = 64;
218
		break;
203
		break;
219
	case 2:
204
	case 2:
220
		rdev->mc.vram_width = 128;
205
		rdev->mc.vram_width = 128;
221
		break;
206
		break;
222
	case 3:
207
	case 3:
223
		rdev->mc.vram_width = 256;
208
		rdev->mc.vram_width = 256;
224
		break;
209
		break;
225
	default:
210
	default:
226
		rdev->mc.vram_width = 128;
211
		rdev->mc.vram_width = 128;
227
		break;
212
		break;
228
	}
213
	}
229
	if (tmp & R520_MC_CHANNEL_SIZE)
214
	if (tmp & R520_MC_CHANNEL_SIZE)
230
		rdev->mc.vram_width *= 2;
215
		rdev->mc.vram_width *= 2;
231
}
216
}
232
 
217
 
233
void r520_vram_info(struct radeon_device *rdev)
218
void r520_vram_info(struct radeon_device *rdev)
234
{
219
{
235
	r520_vram_get_type(rdev);
-
 
236
	rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
-
 
237
 
-
 
238
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-
 
239
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
 
240
}
-
 
241
 
-
 
242
 
-
 
243
int radeon_agp_init(struct radeon_device *rdev)
-
 
244
{
-
 
245
 
-
 
246
    dbgprintf("%s\n",__FUNCTION__);
-
 
247
 
-
 
248
#if __OS_HAS_AGP
-
 
249
    struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
-
 
250
    struct drm_agp_mode mode;
-
 
251
    struct drm_agp_info info;
-
 
252
    uint32_t agp_status;
-
 
253
    int default_mode;
-
 
254
    bool is_v3;
-
 
255
    int ret;
-
 
256
 
-
 
257
    /* Acquire AGP. */
-
 
258
    if (!rdev->ddev->agp->acquired) {
-
 
259
        ret = drm_agp_acquire(rdev->ddev);
-
 
260
        if (ret) {
-
 
261
            DRM_ERROR("Unable to acquire AGP: %d\n", ret);
-
 
262
            return ret;
-
 
263
        }
-
 
264
    }
-
 
265
 
-
 
266
    ret = drm_agp_info(rdev->ddev, &info);
-
 
267
    if (ret) {
-
 
268
        DRM_ERROR("Unable to get AGP info: %d\n", ret);
-
 
269
        return ret;
-
 
270
    }
-
 
271
    mode.mode = info.mode;
-
 
272
    agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
-
 
273
    is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
-
 
274
 
-
 
275
    if (is_v3) {
220
	fixed20_12 a;
276
        default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
-
 
277
    } else {
-
 
278
        if (agp_status & RADEON_AGP_4X_MODE) {
-
 
279
            default_mode = 4;
-
 
280
        } else if (agp_status & RADEON_AGP_2X_MODE) {
-
 
281
            default_mode = 2;
-
 
282
        } else {
-
 
283
            default_mode = 1;
-
 
284
        }
-
 
285
    }
-
 
286
 
-
 
287
    /* Apply AGPMode Quirks */
-
 
288
    while (p && p->chip_device != 0) {
-
 
289
        if (info.id_vendor == p->hostbridge_vendor &&
-
 
290
            info.id_device == p->hostbridge_device &&
-
 
291
            rdev->pdev->vendor == p->chip_vendor &&
-
 
292
            rdev->pdev->device == p->chip_device &&
-
 
293
            rdev->pdev->subsystem_vendor == p->subsys_vendor &&
-
 
294
            rdev->pdev->subsystem_device == p->subsys_device) {
-
 
295
            default_mode = p->default_mode;
-
 
296
        }
-
 
297
        ++p;
-
 
298
    }
-
 
299
 
-
 
300
    if (radeon_agpmode > 0) {
-
 
301
        if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
-
 
302
            (radeon_agpmode > (is_v3 ? 8 : 4)) ||
-
 
303
            (radeon_agpmode & (radeon_agpmode - 1))) {
-
 
304
            DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
-
 
305
                  radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
-
 
306
                  default_mode);
-
 
307
            radeon_agpmode = default_mode;
-
 
308
        } else {
-
 
309
            DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
-
 
310
        }
-
 
311
    } else {
-
 
312
        radeon_agpmode = default_mode;
-
 
313
    }
-
 
314
 
-
 
315
    mode.mode &= ~RADEON_AGP_MODE_MASK;
-
 
316
    if (is_v3) {
-
 
317
        switch (radeon_agpmode) {
-
 
318
        case 8:
-
 
319
            mode.mode |= RADEON_AGPv3_8X_MODE;
-
 
320
            break;
-
 
321
        case 4:
-
 
322
        default:
-
 
323
            mode.mode |= RADEON_AGPv3_4X_MODE;
-
 
324
            break;
-
 
325
        }
-
 
326
    } else {
-
 
327
        switch (radeon_agpmode) {
-
 
328
        case 4:
-
 
329
            mode.mode |= RADEON_AGP_4X_MODE;
-
 
330
            break;
-
 
331
        case 2:
-
 
332
            mode.mode |= RADEON_AGP_2X_MODE;
-
 
333
            break;
-
 
334
        case 1:
-
 
335
        default:
-
 
336
            mode.mode |= RADEON_AGP_1X_MODE;
-
 
337
            break;
-
 
338
        }
-
 
339
    }
-
 
340
 
-
 
341
    mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
221
 
342
    ret = drm_agp_enable(rdev->ddev, mode);
-
 
343
    if (ret) {
-
 
344
        DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
-
 
345
        return ret;
-
 
346
    }
222
	r520_vram_get_type(rdev);
347
 
223
 
348
    rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
-
 
349
    rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
224
	r100_vram_init_sizes(rdev);
350
 
-
 
351
    /* workaround some hw issues */
-
 
352
    if (rdev->family < CHIP_R200) {
225
	/* FIXME: we should enforce default clock in case GPU is not in
353
        WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
226
	 * default setup
354
    }
-
 
-
 
227
	 */
355
    return 0;
228
	a.full = rfixed_const(100);
356
#else
-
 
357
    return 0;
229
	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
358
#endif
-
 
359
}
-
 
360
 
-
 
361
 
230
	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
362
 
-
 
363
 
-
 
364
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
-
 
365
 
-
 
366
 
-
 
367
 
231
}
368
 
232
 
369
int radeon_fence_driver_init(struct radeon_device *rdev)
-
 
370
{
-
 
371
    unsigned long irq_flags;
-
 
372
    int r;
-
 
373
 
-
 
374
//    write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-
 
375
    r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
-
 
376
    if (r) {
-
 
377
        DRM_ERROR("Fence failed to get a scratch register.");
233
void r520_bandwidth_update(struct radeon_device *rdev)
378
//        write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-
 
379
        return r;
-
 
380
    }
-
 
381
    WREG32(rdev->fence_drv.scratch_reg, 0);
-
 
382
//    atomic_set(&rdev->fence_drv.seq, 0);
-
 
383
//    INIT_LIST_HEAD(&rdev->fence_drv.created);
-
 
384
//    INIT_LIST_HEAD(&rdev->fence_drv.emited);
-
 
385
//    INIT_LIST_HEAD(&rdev->fence_drv.signaled);
-
 
386
    rdev->fence_drv.count_timeout = 0;
-
 
387
//    init_waitqueue_head(&rdev->fence_drv.queue);
-
 
388
//    write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-
 
389
//    if (radeon_debugfs_fence_init(rdev)) {
-
 
390
//        DRM_ERROR("Failed to register debugfs file for fence !\n");
-
 
391
//    }
-
 
392
    return 0;
-
 
393
}
234
{
-
 
235
	rv515_bandwidth_avivo_update(rdev);
-
 
236
}