Subversion Repositories Kolibri OS

Rev

Rev 2160 | Rev 2997 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2160 Rev 2175
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
//#include 
29
 
29
 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "radeon_reg.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
34
#include "radeon.h"
35
#include "atom.h"
35
#include "atom.h"
36
#include "display.h"
36
#include "display.h"
37
 
37
 
38
#include 
38
#include 
39
 
39
 
40
 
40
 
41
int radeon_no_wb   =  1;
41
int radeon_no_wb   =  1;
42
int radeon_modeset = -1;
42
int radeon_modeset = -1;
43
int radeon_dynclks = -1;
43
int radeon_dynclks = -1;
44
int radeon_r4xx_atom = 0;
44
int radeon_r4xx_atom = 0;
45
int radeon_agpmode = 0;
45
int radeon_agpmode = 0;
46
int radeon_vram_limit = 0;
46
int radeon_vram_limit = 0;
47
int radeon_gart_size = 512; /* default gart size */
47
int radeon_gart_size = 512; /* default gart size */
48
int radeon_benchmarking = 0;
48
int radeon_benchmarking = 0;
49
int radeon_testing = 0;
49
int radeon_testing = 0;
50
int radeon_connector_table = 0;
50
int radeon_connector_table = 0;
51
int radeon_tv = 1;
51
int radeon_tv = 1;
52
int radeon_new_pll = -1;
52
int radeon_new_pll = -1;
53
int radeon_dynpm = -1;
53
int radeon_dynpm = -1;
54
int radeon_audio = 1;
54
int radeon_audio = 1;
55
int radeon_hw_i2c = 0;
55
int radeon_hw_i2c = 0;
56
int radeon_pcie_gen2 = 0;
56
int radeon_pcie_gen2 = 0;
57
int radeon_disp_priority = 0;
57
int radeon_disp_priority = 0;
58
 
58
 
59
int irq_override = 0;
59
int irq_override = 0;
60
 
60
 
61
 
61
 
62
extern display_t *rdisplay;
62
extern display_t *rdisplay;
63
 
63
 
64
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
64
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
65
int init_display(struct radeon_device *rdev, videomode_t *mode);
65
int init_display(struct radeon_device *rdev, videomode_t *mode);
66
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
66
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
67
 
67
 
68
int get_modes(videomode_t *mode, int *count);
68
int get_modes(videomode_t *mode, int *count);
69
int set_user_mode(videomode_t *mode);
69
int set_user_mode(videomode_t *mode);
70
int r100_2D_test(struct radeon_device *rdev);
70
int r100_2D_test(struct radeon_device *rdev);
71
 
71
 
72
 
72
 
73
 /* Legacy VGA regions */
73
 /* Legacy VGA regions */
74
#define VGA_RSRC_NONE          0x00
74
#define VGA_RSRC_NONE          0x00
75
#define VGA_RSRC_LEGACY_IO     0x01
75
#define VGA_RSRC_LEGACY_IO     0x01
76
#define VGA_RSRC_LEGACY_MEM    0x02
76
#define VGA_RSRC_LEGACY_MEM    0x02
77
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
77
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
78
/* Non-legacy access */
78
/* Non-legacy access */
79
#define VGA_RSRC_NORMAL_IO     0x04
79
#define VGA_RSRC_NORMAL_IO     0x04
80
#define VGA_RSRC_NORMAL_MEM    0x08
80
#define VGA_RSRC_NORMAL_MEM    0x08
81
 
81
 
82
 
82
 
83
static const char radeon_family_name[][16] = {
83
static const char radeon_family_name[][16] = {
84
	"R100",
84
	"R100",
85
	"RV100",
85
	"RV100",
86
	"RS100",
86
	"RS100",
87
	"RV200",
87
	"RV200",
88
	"RS200",
88
	"RS200",
89
	"R200",
89
	"R200",
90
	"RV250",
90
	"RV250",
91
	"RS300",
91
	"RS300",
92
	"RV280",
92
	"RV280",
93
	"R300",
93
	"R300",
94
	"R350",
94
	"R350",
95
	"RV350",
95
	"RV350",
96
	"RV380",
96
	"RV380",
97
	"R420",
97
	"R420",
98
	"R423",
98
	"R423",
99
	"RV410",
99
	"RV410",
100
	"RS400",
100
	"RS400",
101
	"RS480",
101
	"RS480",
102
	"RS600",
102
	"RS600",
103
	"RS690",
103
	"RS690",
104
	"RS740",
104
	"RS740",
105
	"RV515",
105
	"RV515",
106
	"R520",
106
	"R520",
107
	"RV530",
107
	"RV530",
108
	"RV560",
108
	"RV560",
109
	"RV570",
109
	"RV570",
110
	"R580",
110
	"R580",
111
	"R600",
111
	"R600",
112
	"RV610",
112
	"RV610",
113
	"RV630",
113
	"RV630",
114
	"RV670",
114
	"RV670",
115
	"RV620",
115
	"RV620",
116
	"RV635",
116
	"RV635",
117
	"RS780",
117
	"RS780",
118
	"RS880",
118
	"RS880",
119
	"RV770",
119
	"RV770",
120
	"RV730",
120
	"RV730",
121
	"RV710",
121
	"RV710",
122
	"RV740",
122
	"RV740",
123
	"CEDAR",
123
	"CEDAR",
124
	"REDWOOD",
124
	"REDWOOD",
125
	"JUNIPER",
125
	"JUNIPER",
126
	"CYPRESS",
126
	"CYPRESS",
127
	"HEMLOCK",
127
	"HEMLOCK",
128
	"PALM",
128
	"PALM",
129
	"SUMO",
129
	"SUMO",
130
	"SUMO2",
130
	"SUMO2",
131
	"BARTS",
131
	"BARTS",
132
	"TURKS",
132
	"TURKS",
133
	"CAICOS",
133
	"CAICOS",
134
	"CAYMAN",
134
	"CAYMAN",
135
	"LAST",
135
	"LAST",
136
};
136
};
137
 
137
 
138
/*
138
/*
139
 * Clear GPU surface registers.
139
 * Clear GPU surface registers.
140
 */
140
 */
141
void radeon_surface_init(struct radeon_device *rdev)
141
void radeon_surface_init(struct radeon_device *rdev)
142
{
142
{
143
    /* FIXME: check this out */
143
    /* FIXME: check this out */
144
    if (rdev->family < CHIP_R600) {
144
    if (rdev->family < CHIP_R600) {
145
        int i;
145
        int i;
146
 
146
 
147
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
147
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
148
           radeon_clear_surface_reg(rdev, i);
148
           radeon_clear_surface_reg(rdev, i);
149
        }
149
        }
150
		/* enable surfaces */
150
		/* enable surfaces */
151
		WREG32(RADEON_SURFACE_CNTL, 0);
151
		WREG32(RADEON_SURFACE_CNTL, 0);
152
    }
152
    }
153
}
153
}
154
 
154
 
155
/*
155
/*
156
 * GPU scratch registers helpers function.
156
 * GPU scratch registers helpers function.
157
 */
157
 */
158
void radeon_scratch_init(struct radeon_device *rdev)
158
void radeon_scratch_init(struct radeon_device *rdev)
159
{
159
{
160
    int i;
160
    int i;
161
 
161
 
162
    /* FIXME: check this out */
162
    /* FIXME: check this out */
163
    if (rdev->family < CHIP_R300) {
163
    if (rdev->family < CHIP_R300) {
164
        rdev->scratch.num_reg = 5;
164
        rdev->scratch.num_reg = 5;
165
    } else {
165
    } else {
166
        rdev->scratch.num_reg = 7;
166
        rdev->scratch.num_reg = 7;
167
    }
167
    }
168
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
168
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
169
    for (i = 0; i < rdev->scratch.num_reg; i++) {
169
    for (i = 0; i < rdev->scratch.num_reg; i++) {
170
        rdev->scratch.free[i] = true;
170
        rdev->scratch.free[i] = true;
171
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
171
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
172
    }
172
    }
173
}
173
}
174
 
174
 
175
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
175
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
176
{
176
{
177
	int i;
177
	int i;
178
 
178
 
179
	for (i = 0; i < rdev->scratch.num_reg; i++) {
179
	for (i = 0; i < rdev->scratch.num_reg; i++) {
180
		if (rdev->scratch.free[i]) {
180
		if (rdev->scratch.free[i]) {
181
			rdev->scratch.free[i] = false;
181
			rdev->scratch.free[i] = false;
182
			*reg = rdev->scratch.reg[i];
182
			*reg = rdev->scratch.reg[i];
183
			return 0;
183
			return 0;
184
		}
184
		}
185
	}
185
	}
186
	return -EINVAL;
186
	return -EINVAL;
187
}
187
}
188
 
188
 
189
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
189
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
190
{
190
{
191
	int i;
191
	int i;
192
 
192
 
193
	for (i = 0; i < rdev->scratch.num_reg; i++) {
193
	for (i = 0; i < rdev->scratch.num_reg; i++) {
194
		if (rdev->scratch.reg[i] == reg) {
194
		if (rdev->scratch.reg[i] == reg) {
195
			rdev->scratch.free[i] = true;
195
			rdev->scratch.free[i] = true;
196
			return;
196
			return;
197
		}
197
		}
198
	}
198
	}
199
}
199
}
200
 
200
 
201
void radeon_wb_disable(struct radeon_device *rdev)
201
void radeon_wb_disable(struct radeon_device *rdev)
202
{
202
{
203
	int r;
203
	int r;
204
 
204
 
205
	if (rdev->wb.wb_obj) {
205
	if (rdev->wb.wb_obj) {
206
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
206
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
207
		if (unlikely(r != 0))
207
		if (unlikely(r != 0))
208
			return;
208
			return;
209
		radeon_bo_kunmap(rdev->wb.wb_obj);
209
		radeon_bo_kunmap(rdev->wb.wb_obj);
210
		radeon_bo_unpin(rdev->wb.wb_obj);
210
		radeon_bo_unpin(rdev->wb.wb_obj);
211
		radeon_bo_unreserve(rdev->wb.wb_obj);
211
		radeon_bo_unreserve(rdev->wb.wb_obj);
212
	}
212
	}
213
	rdev->wb.enabled = false;
213
	rdev->wb.enabled = false;
214
}
214
}
215
 
215
 
216
void radeon_wb_fini(struct radeon_device *rdev)
216
void radeon_wb_fini(struct radeon_device *rdev)
217
{
217
{
218
	radeon_wb_disable(rdev);
218
	radeon_wb_disable(rdev);
219
	if (rdev->wb.wb_obj) {
219
	if (rdev->wb.wb_obj) {
220
		radeon_bo_unref(&rdev->wb.wb_obj);
220
		radeon_bo_unref(&rdev->wb.wb_obj);
221
		rdev->wb.wb = NULL;
221
		rdev->wb.wb = NULL;
222
		rdev->wb.wb_obj = NULL;
222
		rdev->wb.wb_obj = NULL;
223
	}
223
	}
224
}
224
}
225
 
225
 
226
int radeon_wb_init(struct radeon_device *rdev)
226
int radeon_wb_init(struct radeon_device *rdev)
227
{
227
{
228
	int r;
228
	int r;
229
 
229
 
230
	if (rdev->wb.wb_obj == NULL) {
230
	if (rdev->wb.wb_obj == NULL) {
231
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
231
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
232
				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
232
				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
233
		if (r) {
233
		if (r) {
234
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
234
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
235
			return r;
235
			return r;
236
		}
236
		}
237
	}
237
	}
238
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
238
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
239
	if (unlikely(r != 0)) {
239
	if (unlikely(r != 0)) {
240
		radeon_wb_fini(rdev);
240
		radeon_wb_fini(rdev);
241
		return r;
241
		return r;
242
	}
242
	}
243
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
243
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
244
			  &rdev->wb.gpu_addr);
244
			  &rdev->wb.gpu_addr);
245
	if (r) {
245
	if (r) {
246
		radeon_bo_unreserve(rdev->wb.wb_obj);
246
		radeon_bo_unreserve(rdev->wb.wb_obj);
247
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
247
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
248
		radeon_wb_fini(rdev);
248
		radeon_wb_fini(rdev);
249
		return r;
249
		return r;
250
	}
250
	}
251
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
251
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
252
	radeon_bo_unreserve(rdev->wb.wb_obj);
252
	radeon_bo_unreserve(rdev->wb.wb_obj);
253
	if (r) {
253
	if (r) {
254
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
254
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
255
		radeon_wb_fini(rdev);
255
		radeon_wb_fini(rdev);
256
		return r;
256
		return r;
257
	}
257
	}
258
 
258
 
259
	/* clear wb memory */
259
	/* clear wb memory */
260
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
260
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
261
	/* disable event_write fences */
261
	/* disable event_write fences */
262
	rdev->wb.use_event = false;
262
	rdev->wb.use_event = false;
263
	/* disabled via module param */
263
	/* disabled via module param */
264
	if (radeon_no_wb == 1)
264
	if (radeon_no_wb == 1)
265
		rdev->wb.enabled = false;
265
		rdev->wb.enabled = false;
266
	else {
266
	else {
267
		/* often unreliable on AGP */
267
		/* often unreliable on AGP */
268
//		if (rdev->flags & RADEON_IS_AGP) {
268
//		if (rdev->flags & RADEON_IS_AGP) {
269
//			rdev->wb.enabled = false;
269
//			rdev->wb.enabled = false;
270
//		} else {
270
//		} else {
271
			rdev->wb.enabled = true;
271
			rdev->wb.enabled = true;
272
			/* event_write fences are only available on r600+ */
272
			/* event_write fences are only available on r600+ */
273
			if (rdev->family >= CHIP_R600)
273
			if (rdev->family >= CHIP_R600)
274
				rdev->wb.use_event = true;
274
				rdev->wb.use_event = true;
275
//		}
275
//		}
276
	}
276
	}
277
	/* always use writeback/events on NI */
277
	/* always use writeback/events on NI */
278
	if (ASIC_IS_DCE5(rdev)) {
278
	if (ASIC_IS_DCE5(rdev)) {
279
		rdev->wb.enabled = true;
279
		rdev->wb.enabled = true;
280
		rdev->wb.use_event = true;
280
		rdev->wb.use_event = true;
281
	}
281
	}
282
 
282
 
283
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
283
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
284
 
284
 
285
	return 0;
285
	return 0;
286
}
286
}
287
 
287
 
288
/**
288
/**
289
 * radeon_vram_location - try to find VRAM location
289
 * radeon_vram_location - try to find VRAM location
290
 * @rdev: radeon device structure holding all necessary informations
290
 * @rdev: radeon device structure holding all necessary informations
291
 * @mc: memory controller structure holding memory informations
291
 * @mc: memory controller structure holding memory informations
292
 * @base: base address at which to put VRAM
292
 * @base: base address at which to put VRAM
293
 *
293
 *
294
 * Function will place try to place VRAM at base address provided
294
 * Function will place try to place VRAM at base address provided
295
 * as parameter (which is so far either PCI aperture address or
295
 * as parameter (which is so far either PCI aperture address or
296
 * for IGP TOM base address).
296
 * for IGP TOM base address).
297
 *
297
 *
298
 * If there is not enough space to fit the unvisible VRAM in the 32bits
298
 * If there is not enough space to fit the unvisible VRAM in the 32bits
299
 * address space then we limit the VRAM size to the aperture.
299
 * address space then we limit the VRAM size to the aperture.
300
 *
300
 *
301
 * If we are using AGP and if the AGP aperture doesn't allow us to have
301
 * If we are using AGP and if the AGP aperture doesn't allow us to have
302
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
302
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
303
 * size and print a warning.
303
 * size and print a warning.
304
 *
304
 *
305
 * This function will never fails, worst case are limiting VRAM.
305
 * This function will never fails, worst case are limiting VRAM.
306
 *
306
 *
307
 * Note: GTT start, end, size should be initialized before calling this
307
 * Note: GTT start, end, size should be initialized before calling this
308
 * function on AGP platform.
308
 * function on AGP platform.
309
 *
309
 *
310
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
310
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
311
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
311
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
312
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
312
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
313
 * not IGP.
313
 * not IGP.
314
 *
314
 *
315
 * Note: we use mc_vram_size as on some board we need to program the mc to
315
 * Note: we use mc_vram_size as on some board we need to program the mc to
316
 * cover the whole aperture even if VRAM size is inferior to aperture size
316
 * cover the whole aperture even if VRAM size is inferior to aperture size
317
 * Novell bug 204882 + along with lots of ubuntu ones
317
 * Novell bug 204882 + along with lots of ubuntu ones
318
 *
318
 *
319
 * Note: when limiting vram it's safe to overwritte real_vram_size because
319
 * Note: when limiting vram it's safe to overwritte real_vram_size because
320
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
320
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
321
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
321
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
322
 * ones)
322
 * ones)
323
 *
323
 *
324
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
324
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
325
 * explicitly check for that thought.
325
 * explicitly check for that thought.
326
 *
326
 *
327
 * FIXME: when reducing VRAM size align new size on power of 2.
327
 * FIXME: when reducing VRAM size align new size on power of 2.
328
 */
328
 */
329
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
329
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
330
{
330
{
331
	mc->vram_start = base;
331
	mc->vram_start = base;
332
	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
332
	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
333
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
333
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
334
		mc->real_vram_size = mc->aper_size;
334
		mc->real_vram_size = mc->aper_size;
335
		mc->mc_vram_size = mc->aper_size;
335
		mc->mc_vram_size = mc->aper_size;
336
	}
336
	}
337
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
337
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
338
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
338
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
339
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
339
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
340
		mc->real_vram_size = mc->aper_size;
340
		mc->real_vram_size = mc->aper_size;
341
		mc->mc_vram_size = mc->aper_size;
341
		mc->mc_vram_size = mc->aper_size;
342
		}
342
		}
343
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
343
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
344
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
344
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
345
			mc->mc_vram_size >> 20, mc->vram_start,
345
			mc->mc_vram_size >> 20, mc->vram_start,
346
			mc->vram_end, mc->real_vram_size >> 20);
346
			mc->vram_end, mc->real_vram_size >> 20);
347
}
347
}
348
 
348
 
349
/**
349
/**
350
 * radeon_gtt_location - try to find GTT location
350
 * radeon_gtt_location - try to find GTT location
351
 * @rdev: radeon device structure holding all necessary informations
351
 * @rdev: radeon device structure holding all necessary informations
352
 * @mc: memory controller structure holding memory informations
352
 * @mc: memory controller structure holding memory informations
353
 *
353
 *
354
 * Function will place try to place GTT before or after VRAM.
354
 * Function will place try to place GTT before or after VRAM.
355
 *
355
 *
356
 * If GTT size is bigger than space left then we ajust GTT size.
356
 * If GTT size is bigger than space left then we ajust GTT size.
357
 * Thus function will never fails.
357
 * Thus function will never fails.
358
 *
358
 *
359
 * FIXME: when reducing GTT size align new size on power of 2.
359
 * FIXME: when reducing GTT size align new size on power of 2.
360
 */
360
 */
361
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
361
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
362
{
362
{
363
	u64 size_af, size_bf;
363
	u64 size_af, size_bf;
364
 
364
 
365
	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
365
	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
366
	size_bf = mc->vram_start & ~mc->gtt_base_align;
366
	size_bf = mc->vram_start & ~mc->gtt_base_align;
367
	if (size_bf > size_af) {
367
	if (size_bf > size_af) {
368
		if (mc->gtt_size > size_bf) {
368
		if (mc->gtt_size > size_bf) {
369
			dev_warn(rdev->dev, "limiting GTT\n");
369
			dev_warn(rdev->dev, "limiting GTT\n");
370
			mc->gtt_size = size_bf;
370
			mc->gtt_size = size_bf;
371
		}
371
		}
372
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
372
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
373
	} else {
373
	} else {
374
		if (mc->gtt_size > size_af) {
374
		if (mc->gtt_size > size_af) {
375
			dev_warn(rdev->dev, "limiting GTT\n");
375
			dev_warn(rdev->dev, "limiting GTT\n");
376
			mc->gtt_size = size_af;
376
			mc->gtt_size = size_af;
377
		}
377
		}
378
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
378
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
379
	}
379
	}
380
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
380
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
381
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
381
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
382
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
382
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
383
}
383
}
384
 
384
 
385
/*
385
/*
386
 * GPU helpers function.
386
 * GPU helpers function.
387
 */
387
 */
388
bool radeon_card_posted(struct radeon_device *rdev)
388
bool radeon_card_posted(struct radeon_device *rdev)
389
{
389
{
390
	uint32_t reg;
390
	uint32_t reg;
391
 
391
 
392
	/* first check CRTCs */
392
	/* first check CRTCs */
393
	if (ASIC_IS_DCE41(rdev)) {
393
	if (ASIC_IS_DCE41(rdev)) {
394
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
394
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
395
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
395
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
396
		if (reg & EVERGREEN_CRTC_MASTER_EN)
396
		if (reg & EVERGREEN_CRTC_MASTER_EN)
397
			return true;
397
			return true;
398
	} else if (ASIC_IS_DCE4(rdev)) {
398
	} else if (ASIC_IS_DCE4(rdev)) {
399
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
399
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
400
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
400
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
401
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
401
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
402
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
402
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
403
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
403
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
404
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
404
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
405
		if (reg & EVERGREEN_CRTC_MASTER_EN)
405
		if (reg & EVERGREEN_CRTC_MASTER_EN)
406
			return true;
406
			return true;
407
	} else if (ASIC_IS_AVIVO(rdev)) {
407
	} else if (ASIC_IS_AVIVO(rdev)) {
408
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
408
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
409
		      RREG32(AVIVO_D2CRTC_CONTROL);
409
		      RREG32(AVIVO_D2CRTC_CONTROL);
410
		if (reg & AVIVO_CRTC_EN) {
410
		if (reg & AVIVO_CRTC_EN) {
411
			return true;
411
			return true;
412
		}
412
		}
413
	} else {
413
	} else {
414
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
414
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
415
		      RREG32(RADEON_CRTC2_GEN_CNTL);
415
		      RREG32(RADEON_CRTC2_GEN_CNTL);
416
		if (reg & RADEON_CRTC_EN) {
416
		if (reg & RADEON_CRTC_EN) {
417
			return true;
417
			return true;
418
		}
418
		}
419
	}
419
	}
420
 
420
 
421
	/* then check MEM_SIZE, in case the crtcs are off */
421
	/* then check MEM_SIZE, in case the crtcs are off */
422
	if (rdev->family >= CHIP_R600)
422
	if (rdev->family >= CHIP_R600)
423
		reg = RREG32(R600_CONFIG_MEMSIZE);
423
		reg = RREG32(R600_CONFIG_MEMSIZE);
424
	else
424
	else
425
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
425
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
426
 
426
 
427
	if (reg)
427
	if (reg)
428
		return true;
428
		return true;
429
 
429
 
430
	return false;
430
	return false;
431
 
431
 
432
}
432
}
433
 
433
 
434
void radeon_update_bandwidth_info(struct radeon_device *rdev)
434
void radeon_update_bandwidth_info(struct radeon_device *rdev)
435
{
435
{
436
	fixed20_12 a;
436
	fixed20_12 a;
437
	u32 sclk = rdev->pm.current_sclk;
437
	u32 sclk = rdev->pm.current_sclk;
438
	u32 mclk = rdev->pm.current_mclk;
438
	u32 mclk = rdev->pm.current_mclk;
439
 
439
 
440
	/* sclk/mclk in Mhz */
440
	/* sclk/mclk in Mhz */
441
		a.full = dfixed_const(100);
441
		a.full = dfixed_const(100);
442
		rdev->pm.sclk.full = dfixed_const(sclk);
442
		rdev->pm.sclk.full = dfixed_const(sclk);
443
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
443
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
444
		rdev->pm.mclk.full = dfixed_const(mclk);
444
		rdev->pm.mclk.full = dfixed_const(mclk);
445
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
445
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
446
 
446
 
447
	if (rdev->flags & RADEON_IS_IGP) {
447
	if (rdev->flags & RADEON_IS_IGP) {
448
		a.full = dfixed_const(16);
448
		a.full = dfixed_const(16);
449
		/* core_bandwidth = sclk(Mhz) * 16 */
449
		/* core_bandwidth = sclk(Mhz) * 16 */
450
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
450
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
451
	}
451
	}
452
}
452
}
453
 
453
 
454
bool radeon_boot_test_post_card(struct radeon_device *rdev)
454
bool radeon_boot_test_post_card(struct radeon_device *rdev)
455
{
455
{
456
	if (radeon_card_posted(rdev))
456
	if (radeon_card_posted(rdev))
457
		return true;
457
		return true;
458
 
458
 
459
	if (rdev->bios) {
459
	if (rdev->bios) {
460
		DRM_INFO("GPU not posted. posting now...\n");
460
		DRM_INFO("GPU not posted. posting now...\n");
461
		if (rdev->is_atom_bios)
461
		if (rdev->is_atom_bios)
462
			atom_asic_init(rdev->mode_info.atom_context);
462
			atom_asic_init(rdev->mode_info.atom_context);
463
		else
463
		else
464
			radeon_combios_asic_init(rdev->ddev);
464
			radeon_combios_asic_init(rdev->ddev);
465
		return true;
465
		return true;
466
	} else {
466
	} else {
467
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
467
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
468
		return false;
468
		return false;
469
	}
469
	}
470
}
470
}
471
 
471
 
472
int radeon_dummy_page_init(struct radeon_device *rdev)
472
int radeon_dummy_page_init(struct radeon_device *rdev)
473
{
473
{
474
	if (rdev->dummy_page.page)
474
	if (rdev->dummy_page.page)
475
		return 0;
475
		return 0;
476
    rdev->dummy_page.page = AllocPage();
476
    rdev->dummy_page.page = AllocPage();
477
	if (rdev->dummy_page.page == NULL)
477
	if (rdev->dummy_page.page == NULL)
478
		return -ENOMEM;
478
		return -ENOMEM;
479
    rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
479
    rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
480
	if (!rdev->dummy_page.addr) {
480
	if (!rdev->dummy_page.addr) {
481
//       __free_page(rdev->dummy_page.page);
481
//       __free_page(rdev->dummy_page.page);
482
		rdev->dummy_page.page = NULL;
482
		rdev->dummy_page.page = NULL;
483
		return -ENOMEM;
483
		return -ENOMEM;
484
	}
484
	}
485
	return 0;
485
	return 0;
486
}
486
}
487
 
487
 
488
void radeon_dummy_page_fini(struct radeon_device *rdev)
488
void radeon_dummy_page_fini(struct radeon_device *rdev)
489
{
489
{
490
	if (rdev->dummy_page.page == NULL)
490
	if (rdev->dummy_page.page == NULL)
491
		return;
491
		return;
492
    KernelFree(rdev->dummy_page.addr);
492
    KernelFree(rdev->dummy_page.addr);
493
	rdev->dummy_page.page = NULL;
493
	rdev->dummy_page.page = NULL;
494
}
494
}
495
 
495
 
496
 
496
 
497
/* ATOM accessor methods */
497
/* ATOM accessor methods */
498
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
498
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
499
{
499
{
500
    struct radeon_device *rdev = info->dev->dev_private;
500
    struct radeon_device *rdev = info->dev->dev_private;
501
    uint32_t r;
501
    uint32_t r;
502
 
502
 
503
    r = rdev->pll_rreg(rdev, reg);
503
    r = rdev->pll_rreg(rdev, reg);
504
    return r;
504
    return r;
505
}
505
}
506
 
506
 
507
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
507
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
508
{
508
{
509
    struct radeon_device *rdev = info->dev->dev_private;
509
    struct radeon_device *rdev = info->dev->dev_private;
510
 
510
 
511
    rdev->pll_wreg(rdev, reg, val);
511
    rdev->pll_wreg(rdev, reg, val);
512
}
512
}
513
 
513
 
514
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
514
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
515
{
515
{
516
    struct radeon_device *rdev = info->dev->dev_private;
516
    struct radeon_device *rdev = info->dev->dev_private;
517
    uint32_t r;
517
    uint32_t r;
518
 
518
 
519
    r = rdev->mc_rreg(rdev, reg);
519
    r = rdev->mc_rreg(rdev, reg);
520
    return r;
520
    return r;
521
}
521
}
522
 
522
 
523
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
523
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
524
{
524
{
525
    struct radeon_device *rdev = info->dev->dev_private;
525
    struct radeon_device *rdev = info->dev->dev_private;
526
 
526
 
527
    rdev->mc_wreg(rdev, reg, val);
527
    rdev->mc_wreg(rdev, reg, val);
528
}
528
}
529
 
529
 
530
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
530
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
531
{
531
{
532
    struct radeon_device *rdev = info->dev->dev_private;
532
    struct radeon_device *rdev = info->dev->dev_private;
533
 
533
 
534
    WREG32(reg*4, val);
534
    WREG32(reg*4, val);
535
}
535
}
536
 
536
 
537
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
537
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
538
{
538
{
539
    struct radeon_device *rdev = info->dev->dev_private;
539
    struct radeon_device *rdev = info->dev->dev_private;
540
    uint32_t r;
540
    uint32_t r;
541
 
541
 
542
    r = RREG32(reg*4);
542
    r = RREG32(reg*4);
543
    return r;
543
    return r;
544
}
544
}
545
 
545
 
546
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
546
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
547
{
547
{
548
	struct radeon_device *rdev = info->dev->dev_private;
548
	struct radeon_device *rdev = info->dev->dev_private;
549
 
549
 
550
	WREG32_IO(reg*4, val);
550
	WREG32_IO(reg*4, val);
551
}
551
}
552
 
552
 
553
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
553
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
554
{
554
{
555
	struct radeon_device *rdev = info->dev->dev_private;
555
	struct radeon_device *rdev = info->dev->dev_private;
556
	uint32_t r;
556
	uint32_t r;
557
 
557
 
558
	r = RREG32_IO(reg*4);
558
	r = RREG32_IO(reg*4);
559
	return r;
559
	return r;
560
}
560
}
561
 
561
 
562
int radeon_atombios_init(struct radeon_device *rdev)
562
int radeon_atombios_init(struct radeon_device *rdev)
563
{
563
{
564
	struct card_info *atom_card_info =
564
	struct card_info *atom_card_info =
565
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
565
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
566
 
566
 
567
	if (!atom_card_info)
567
	if (!atom_card_info)
568
		return -ENOMEM;
568
		return -ENOMEM;
569
 
569
 
570
	rdev->mode_info.atom_card_info = atom_card_info;
570
	rdev->mode_info.atom_card_info = atom_card_info;
571
	atom_card_info->dev = rdev->ddev;
571
	atom_card_info->dev = rdev->ddev;
572
	atom_card_info->reg_read = cail_reg_read;
572
	atom_card_info->reg_read = cail_reg_read;
573
	atom_card_info->reg_write = cail_reg_write;
573
	atom_card_info->reg_write = cail_reg_write;
574
	/* needed for iio ops */
574
	/* needed for iio ops */
575
	if (rdev->rio_mem) {
575
	if (rdev->rio_mem) {
576
		atom_card_info->ioreg_read = cail_ioreg_read;
576
		atom_card_info->ioreg_read = cail_ioreg_read;
577
		atom_card_info->ioreg_write = cail_ioreg_write;
577
		atom_card_info->ioreg_write = cail_ioreg_write;
578
	} else {
578
	} else {
579
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
579
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
580
		atom_card_info->ioreg_read = cail_reg_read;
580
		atom_card_info->ioreg_read = cail_reg_read;
581
		atom_card_info->ioreg_write = cail_reg_write;
581
		atom_card_info->ioreg_write = cail_reg_write;
582
	}
582
	}
583
	atom_card_info->mc_read = cail_mc_read;
583
	atom_card_info->mc_read = cail_mc_read;
584
	atom_card_info->mc_write = cail_mc_write;
584
	atom_card_info->mc_write = cail_mc_write;
585
	atom_card_info->pll_read = cail_pll_read;
585
	atom_card_info->pll_read = cail_pll_read;
586
	atom_card_info->pll_write = cail_pll_write;
586
	atom_card_info->pll_write = cail_pll_write;
587
 
587
 
588
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
588
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
589
	mutex_init(&rdev->mode_info.atom_context->mutex);
589
	mutex_init(&rdev->mode_info.atom_context->mutex);
590
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
590
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
591
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
591
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
592
    return 0;
592
    return 0;
593
}
593
}
594
 
594
 
595
void radeon_atombios_fini(struct radeon_device *rdev)
595
void radeon_atombios_fini(struct radeon_device *rdev)
596
{
596
{
597
	if (rdev->mode_info.atom_context) {
597
	if (rdev->mode_info.atom_context) {
598
		kfree(rdev->mode_info.atom_context->scratch);
598
		kfree(rdev->mode_info.atom_context->scratch);
599
	kfree(rdev->mode_info.atom_context);
599
	kfree(rdev->mode_info.atom_context);
600
	}
600
	}
601
	kfree(rdev->mode_info.atom_card_info);
601
	kfree(rdev->mode_info.atom_card_info);
602
}
602
}
603
 
603
 
604
int radeon_combios_init(struct radeon_device *rdev)
604
int radeon_combios_init(struct radeon_device *rdev)
605
{
605
{
606
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
606
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
607
	return 0;
607
	return 0;
608
}
608
}
609
 
609
 
610
void radeon_combios_fini(struct radeon_device *rdev)
610
void radeon_combios_fini(struct radeon_device *rdev)
611
{
611
{
612
}
612
}
613
 
613
 
614
/* if we get transitioned to only one device, tak VGA back */
614
/* if we get transitioned to only one device, tak VGA back */
615
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
615
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
616
{
616
{
617
	struct radeon_device *rdev = cookie;
617
	struct radeon_device *rdev = cookie;
618
	radeon_vga_set_state(rdev, state);
618
	radeon_vga_set_state(rdev, state);
619
	if (state)
619
	if (state)
620
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
620
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
621
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
621
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
622
	else
622
	else
623
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
623
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
624
}
624
}
625
 
625
 
626
void radeon_check_arguments(struct radeon_device *rdev)
626
void radeon_check_arguments(struct radeon_device *rdev)
627
{
627
{
628
	/* vramlimit must be a power of two */
628
	/* vramlimit must be a power of two */
629
	switch (radeon_vram_limit) {
629
	switch (radeon_vram_limit) {
630
	case 0:
630
	case 0:
631
	case 4:
631
	case 4:
632
	case 8:
632
	case 8:
633
	case 16:
633
	case 16:
634
	case 32:
634
	case 32:
635
	case 64:
635
	case 64:
636
	case 128:
636
	case 128:
637
	case 256:
637
	case 256:
638
	case 512:
638
	case 512:
639
	case 1024:
639
	case 1024:
640
	case 2048:
640
	case 2048:
641
	case 4096:
641
	case 4096:
642
		break;
642
		break;
643
	default:
643
	default:
644
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
644
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
645
				radeon_vram_limit);
645
				radeon_vram_limit);
646
		radeon_vram_limit = 0;
646
		radeon_vram_limit = 0;
647
		break;
647
		break;
648
	}
648
	}
649
	radeon_vram_limit = radeon_vram_limit << 20;
649
	radeon_vram_limit = radeon_vram_limit << 20;
650
	/* gtt size must be power of two and greater or equal to 32M */
650
	/* gtt size must be power of two and greater or equal to 32M */
651
	switch (radeon_gart_size) {
651
	switch (radeon_gart_size) {
652
	case 4:
652
	case 4:
653
	case 8:
653
	case 8:
654
	case 16:
654
	case 16:
655
		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
655
		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
656
				radeon_gart_size);
656
				radeon_gart_size);
657
		radeon_gart_size = 512;
657
		radeon_gart_size = 512;
658
		break;
658
		break;
659
	case 32:
659
	case 32:
660
	case 64:
660
	case 64:
661
	case 128:
661
	case 128:
662
	case 256:
662
	case 256:
663
	case 512:
663
	case 512:
664
	case 1024:
664
	case 1024:
665
	case 2048:
665
	case 2048:
666
	case 4096:
666
	case 4096:
667
		break;
667
		break;
668
	default:
668
	default:
669
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
669
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
670
				radeon_gart_size);
670
				radeon_gart_size);
671
		radeon_gart_size = 512;
671
		radeon_gart_size = 512;
672
		break;
672
		break;
673
	}
673
	}
674
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
674
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
675
	/* AGP mode can only be -1, 1, 2, 4, 8 */
675
	/* AGP mode can only be -1, 1, 2, 4, 8 */
676
	switch (radeon_agpmode) {
676
	switch (radeon_agpmode) {
677
	case -1:
677
	case -1:
678
	case 0:
678
	case 0:
679
	case 1:
679
	case 1:
680
	case 2:
680
	case 2:
681
	case 4:
681
	case 4:
682
	case 8:
682
	case 8:
683
		break;
683
		break;
684
	default:
684
	default:
685
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
685
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
686
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
686
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
687
		radeon_agpmode = 0;
687
		radeon_agpmode = 0;
688
		break;
688
		break;
689
	}
689
	}
690
}
690
}
691
 
691
 
692
int radeon_device_init(struct radeon_device *rdev,
692
int radeon_device_init(struct radeon_device *rdev,
693
               struct drm_device *ddev,
693
               struct drm_device *ddev,
694
               struct pci_dev *pdev,
694
               struct pci_dev *pdev,
695
               uint32_t flags)
695
               uint32_t flags)
696
{
696
{
697
	int r, i;
697
	int r, i;
698
	int dma_bits;
698
	int dma_bits;
699
 
699
 
700
    rdev->shutdown = false;
700
    rdev->shutdown = false;
701
    rdev->ddev = ddev;
701
    rdev->ddev = ddev;
702
    rdev->pdev = pdev;
702
    rdev->pdev = pdev;
703
    rdev->flags = flags;
703
    rdev->flags = flags;
704
    rdev->family = flags & RADEON_FAMILY_MASK;
704
    rdev->family = flags & RADEON_FAMILY_MASK;
705
    rdev->is_atom_bios = false;
705
    rdev->is_atom_bios = false;
706
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
706
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
707
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
707
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
708
    rdev->gpu_lockup = false;
708
    rdev->gpu_lockup = false;
709
	rdev->accel_working = false;
709
	rdev->accel_working = false;
710
 
710
 
711
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
711
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
712
		radeon_family_name[rdev->family], pdev->vendor, pdev->device);
712
		radeon_family_name[rdev->family], pdev->vendor, pdev->device);
713
 
713
 
714
    /* mutex initialization are all done here so we
714
    /* mutex initialization are all done here so we
715
     * can recall function without having locking issues */
715
     * can recall function without having locking issues */
716
    mutex_init(&rdev->cs_mutex);
716
    mutex_init(&rdev->cs_mutex);
717
    mutex_init(&rdev->ib_pool.mutex);
717
    mutex_init(&rdev->ib_pool.mutex);
718
    mutex_init(&rdev->cp.mutex);
718
    mutex_init(&rdev->cp.mutex);
719
	mutex_init(&rdev->dc_hw_i2c_mutex);
719
	mutex_init(&rdev->dc_hw_i2c_mutex);
720
	if (rdev->family >= CHIP_R600)
720
	if (rdev->family >= CHIP_R600)
721
		spin_lock_init(&rdev->ih.lock);
721
		spin_lock_init(&rdev->ih.lock);
722
	mutex_init(&rdev->gem.mutex);
722
	mutex_init(&rdev->gem.mutex);
723
	mutex_init(&rdev->pm.mutex);
723
	mutex_init(&rdev->pm.mutex);
724
	mutex_init(&rdev->vram_mutex);
724
	mutex_init(&rdev->vram_mutex);
725
	rwlock_init(&rdev->fence_drv.lock);
725
	rwlock_init(&rdev->fence_drv.lock);
726
	INIT_LIST_HEAD(&rdev->gem.objects);
726
	INIT_LIST_HEAD(&rdev->gem.objects);
727
 
727
 
728
	/* Set asic functions */
728
	/* Set asic functions */
729
	r = radeon_asic_init(rdev);
729
	r = radeon_asic_init(rdev);
730
	if (r)
730
	if (r)
731
		return r;
731
		return r;
732
	radeon_check_arguments(rdev);
732
	radeon_check_arguments(rdev);
733
 
733
 
734
	/* all of the newer IGP chips have an internal gart
734
	/* all of the newer IGP chips have an internal gart
735
	 * However some rs4xx report as AGP, so remove that here.
735
	 * However some rs4xx report as AGP, so remove that here.
736
	 */
736
	 */
737
	if ((rdev->family >= CHIP_RS400) &&
737
	if ((rdev->family >= CHIP_RS400) &&
738
	    (rdev->flags & RADEON_IS_IGP)) {
738
	    (rdev->flags & RADEON_IS_IGP)) {
739
		rdev->flags &= ~RADEON_IS_AGP;
739
		rdev->flags &= ~RADEON_IS_AGP;
740
	}
740
	}
741
 
741
 
742
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
742
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
743
		radeon_agp_disable(rdev);
743
		radeon_agp_disable(rdev);
744
    }
744
    }
745
 
745
 
746
	/* set DMA mask + need_dma32 flags.
746
	/* set DMA mask + need_dma32 flags.
747
	 * PCIE - can handle 40-bits.
747
	 * PCIE - can handle 40-bits.
748
	 * IGP - can handle 40-bits (in theory)
748
	 * IGP - can handle 40-bits (in theory)
749
	 * AGP - generally dma32 is safest
749
	 * AGP - generally dma32 is safest
750
	 * PCI - only dma32
750
	 * PCI - only dma32
751
	 */
751
	 */
752
	rdev->need_dma32 = false;
752
	rdev->need_dma32 = false;
753
	if (rdev->flags & RADEON_IS_AGP)
753
	if (rdev->flags & RADEON_IS_AGP)
754
		rdev->need_dma32 = true;
754
		rdev->need_dma32 = true;
755
	if (rdev->flags & RADEON_IS_PCI)
755
	if (rdev->flags & RADEON_IS_PCI)
756
		rdev->need_dma32 = true;
756
		rdev->need_dma32 = true;
757
 
757
 
758
	dma_bits = rdev->need_dma32 ? 32 : 40;
758
	dma_bits = rdev->need_dma32 ? 32 : 40;
759
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
759
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
760
    if (r) {
760
    if (r) {
761
		rdev->need_dma32 = true;
761
		rdev->need_dma32 = true;
762
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
762
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
763
    }
763
    }
764
 
764
 
765
    /* Registers mapping */
765
    /* Registers mapping */
766
    /* TODO: block userspace mapping of io register */
766
    /* TODO: block userspace mapping of io register */
767
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
767
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
768
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
768
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
769
 
769
 
770
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
770
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
771
                                   PG_SW+PG_NOCACHE);
771
                                   PG_SW+PG_NOCACHE);
772
 
772
 
773
    if (rdev->rmmio == NULL) {
773
    if (rdev->rmmio == NULL) {
774
        return -ENOMEM;
774
        return -ENOMEM;
775
    }
775
    }
776
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
776
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
777
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
777
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
778
 
778
 
779
	r = radeon_init(rdev);
779
	r = radeon_init(rdev);
780
	if (r)
780
	if (r)
781
        return r;
781
        return r;
782
 
782
 
783
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
783
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
784
		/* Acceleration not working on AGP card try again
784
		/* Acceleration not working on AGP card try again
785
		 * with fallback to PCI or PCIE GART
785
		 * with fallback to PCI or PCIE GART
786
		 */
786
		 */
787
		radeon_asic_reset(rdev);
787
		radeon_asic_reset(rdev);
788
		radeon_fini(rdev);
788
		radeon_fini(rdev);
789
		radeon_agp_disable(rdev);
789
		radeon_agp_disable(rdev);
790
		r = radeon_init(rdev);
790
		r = radeon_init(rdev);
791
		if (r)
791
		if (r)
792
		return r;
792
		return r;
793
	}
793
	}
794
//	if (radeon_testing) {
794
//	if (radeon_testing) {
795
//		radeon_test_moves(rdev);
795
//		radeon_test_moves(rdev);
796
//    }
796
//    }
797
   if (radeon_benchmarking) {
797
   if (radeon_benchmarking) {
798
       radeon_benchmark(rdev);
798
       radeon_benchmark(rdev);
799
    }
799
    }
800
	return 0;
800
	return 0;
801
}
801
}
802
 
802
 
803
 
803
 
804
/*
804
/*
805
 * Driver load/unload
805
 * Driver load/unload
806
 */
806
 */
807
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
807
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
808
{
808
{
809
    struct radeon_device *rdev;
809
    struct radeon_device *rdev;
810
    int r;
810
    int r;
811
 
811
 
812
    ENTER();
812
    ENTER();
813
 
813
 
814
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
814
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
815
    if (rdev == NULL) {
815
    if (rdev == NULL) {
816
        return -ENOMEM;
816
        return -ENOMEM;
817
    };
817
    };
818
 
818
 
819
    dev->dev_private = (void *)rdev;
819
    dev->dev_private = (void *)rdev;
820
 
820
 
821
    /* update BUS flag */
821
    /* update BUS flag */
822
    if (drm_device_is_agp(dev)) {
822
    if (drm_device_is_agp(dev)) {
823
        flags |= RADEON_IS_AGP;
823
        flags |= RADEON_IS_AGP;
824
    } else if (drm_device_is_pcie(dev)) {
824
    } else if (drm_device_is_pcie(dev)) {
825
        flags |= RADEON_IS_PCIE;
825
        flags |= RADEON_IS_PCIE;
826
    } else {
826
    } else {
827
        flags |= RADEON_IS_PCI;
827
        flags |= RADEON_IS_PCI;
828
    }
828
    }
829
 
829
 
830
    /* radeon_device_init should report only fatal error
830
    /* radeon_device_init should report only fatal error
831
     * like memory allocation failure or iomapping failure,
831
     * like memory allocation failure or iomapping failure,
832
     * or memory manager initialization failure, it must
832
     * or memory manager initialization failure, it must
833
     * properly initialize the GPU MC controller and permit
833
     * properly initialize the GPU MC controller and permit
834
     * VRAM allocation
834
     * VRAM allocation
835
     */
835
     */
836
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
836
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
837
    if (r) {
837
    if (r) {
838
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
838
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
839
        return r;
839
        return r;
840
    }
840
    }
841
    /* Again modeset_init should fail only on fatal error
841
    /* Again modeset_init should fail only on fatal error
842
     * otherwise it should provide enough functionalities
842
     * otherwise it should provide enough functionalities
843
     * for shadowfb to run
843
     * for shadowfb to run
844
     */
844
     */
845
    if( radeon_modeset )
845
    if( radeon_modeset )
846
    {
846
    {
847
        r = radeon_modeset_init(rdev);
847
        r = radeon_modeset_init(rdev);
848
        if (r) {
848
        if (r) {
849
            return r;
849
            return r;
850
        }
850
        }
851
    };
851
    };
852
    return 0;
852
    return 0;
853
}
853
}
854
 
854
 
855
videomode_t usermode;
855
videomode_t usermode;
856
 
856
 
857
 
857
 
858
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
858
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
859
{
859
{
860
    static struct drm_device *dev;
860
    static struct drm_device *dev;
861
    int ret;
861
    int ret;
862
 
862
 
863
    ENTER();
863
    ENTER();
864
 
864
 
865
    dev = kzalloc(sizeof(*dev), 0);
865
    dev = kzalloc(sizeof(*dev), 0);
866
    if (!dev)
866
    if (!dev)
867
        return -ENOMEM;
867
        return -ENOMEM;
868
 
868
 
869
 //   ret = pci_enable_device(pdev);
869
 //   ret = pci_enable_device(pdev);
870
 //   if (ret)
870
 //   if (ret)
871
 //       goto err_g1;
871
 //       goto err_g1;
872
 
872
 
873
 //   pci_set_master(pdev);
873
 //   pci_set_master(pdev);
874
 
874
 
875
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
875
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
876
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
876
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
877
 //       goto err_g2;
877
 //       goto err_g2;
878
 //   }
878
 //   }
879
 
879
 
880
    dev->pdev = pdev;
880
    dev->pdev = pdev;
881
    dev->pci_device = pdev->device;
881
    dev->pci_device = pdev->device;
882
    dev->pci_vendor = pdev->vendor;
882
    dev->pci_vendor = pdev->vendor;
883
 
883
 
884
    INIT_LIST_HEAD(&dev->filelist);
884
    INIT_LIST_HEAD(&dev->filelist);
885
    INIT_LIST_HEAD(&dev->ctxlist);
885
    INIT_LIST_HEAD(&dev->ctxlist);
886
    INIT_LIST_HEAD(&dev->vmalist);
886
    INIT_LIST_HEAD(&dev->vmalist);
887
    INIT_LIST_HEAD(&dev->maplist);
887
    INIT_LIST_HEAD(&dev->maplist);
888
 
888
 
889
    spin_lock_init(&dev->count_lock);
889
    spin_lock_init(&dev->count_lock);
890
    mutex_init(&dev->struct_mutex);
890
    mutex_init(&dev->struct_mutex);
891
    mutex_init(&dev->ctxlist_mutex);
891
    mutex_init(&dev->ctxlist_mutex);
892
 
892
 
893
 
893
 
894
    ret = radeon_driver_load_kms(dev, ent->driver_data );
894
    ret = radeon_driver_load_kms(dev, ent->driver_data );
895
    if (ret)
895
    if (ret)
896
        goto err_g4;
896
        goto err_g4;
897
 
897
 
898
    if( radeon_modeset )
898
    if( radeon_modeset )
899
        init_display_kms(dev->dev_private, &usermode);
899
        init_display_kms(dev->dev_private, &usermode);
900
    else
900
    else
901
        init_display(dev->dev_private, &usermode);
901
        init_display(dev->dev_private, &usermode);
902
 
902
 
903
 
903
 
904
    uint32_t route0 = PciRead32(0, 31<<3, 0x60);
904
    uint32_t route0 = PciRead32(0, 31<<3, 0x60);
905
 
905
 
906
    uint32_t route1 = PciRead32(0, 31<<3, 0x68);
906
    uint32_t route1 = PciRead32(0, 31<<3, 0x68);
907
 
907
 
908
    uint8_t elcr0 = in8(0x4D0);
908
    uint8_t elcr0 = in8(0x4D0);
909
    uint8_t elcr1 = in8(0x4D1);
909
    uint8_t elcr1 = in8(0x4D1);
910
 
910
 
911
    dbgprintf("pci route: %x %x elcr: %x %x\n", route0, route1, elcr0, elcr1);
911
    dbgprintf("pci route: %x %x elcr: %x %x\n", route0, route1, elcr0, elcr1);
912
 
912
 
913
    LEAVE();
913
    LEAVE();
914
 
914
 
915
    return 0;
915
    return 0;
916
 
916
 
917
err_g4:
917
err_g4:
918
//    drm_put_minor(&dev->primary);
918
//    drm_put_minor(&dev->primary);
919
//err_g3:
919
//err_g3:
920
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
920
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
921
//        drm_put_minor(&dev->control);
921
//        drm_put_minor(&dev->control);
922
//err_g2:
922
//err_g2:
923
//    pci_disable_device(pdev);
923
//    pci_disable_device(pdev);
924
//err_g1:
924
//err_g1:
925
    free(dev);
925
    free(dev);
926
 
926
 
927
    LEAVE();
927
    LEAVE();
928
 
928
 
929
    return ret;
929
    return ret;
930
}
930
}
931
 
931
 
932
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
932
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
933
{
933
{
934
    return pci_resource_start(dev->pdev, resource);
934
    return pci_resource_start(dev->pdev, resource);
935
}
935
}
936
 
936
 
937
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
937
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
938
{
938
{
939
    return pci_resource_len(dev->pdev, resource);
939
    return pci_resource_len(dev->pdev, resource);
940
}
940
}
941
 
941
 
942
 
942
 
943
uint32_t __div64_32(uint64_t *n, uint32_t base)
943
uint32_t __div64_32(uint64_t *n, uint32_t base)
944
{
944
{
945
        uint64_t rem = *n;
945
        uint64_t rem = *n;
946
        uint64_t b = base;
946
        uint64_t b = base;
947
        uint64_t res, d = 1;
947
        uint64_t res, d = 1;
948
        uint32_t high = rem >> 32;
948
        uint32_t high = rem >> 32;
949
 
949
 
950
        /* Reduce the thing a bit first */
950
        /* Reduce the thing a bit first */
951
        res = 0;
951
        res = 0;
952
        if (high >= base) {
952
        if (high >= base) {
953
                high /= base;
953
                high /= base;
954
                res = (uint64_t) high << 32;
954
                res = (uint64_t) high << 32;
955
                rem -= (uint64_t) (high*base) << 32;
955
                rem -= (uint64_t) (high*base) << 32;
956
        }
956
        }
957
 
957
 
958
        while ((int64_t)b > 0 && b < rem) {
958
        while ((int64_t)b > 0 && b < rem) {
959
                b = b+b;
959
                b = b+b;
960
                d = d+d;
960
                d = d+d;
961
        }
961
        }
962
 
962
 
963
        do {
963
        do {
964
                if (rem >= b) {
964
                if (rem >= b) {
965
                        rem -= b;
965
                        rem -= b;
966
                        res += d;
966
                        res += d;
967
                }
967
                }
968
                b >>= 1;
968
                b >>= 1;
969
                d >>= 1;
969
                d >>= 1;
970
        } while (d);
970
        } while (d);
971
 
971
 
972
        *n = res;
972
        *n = res;
973
        return rem;
973
        return rem;
974
}
974
}
975
 
975
 
976
 
976
 
977
static struct pci_device_id pciidlist[] = {
977
static struct pci_device_id pciidlist[] = {
978
    radeon_PCI_IDS
978
    radeon_PCI_IDS
979
};
979
};
980
 
980
 
981
 
981
 
982
#define API_VERSION     0x01000100
982
#define API_VERSION     0x01000100
983
 
983
 
984
#define SRV_GETVERSION  0
984
#define SRV_GETVERSION  0
985
#define SRV_ENUM_MODES  1
985
#define SRV_ENUM_MODES  1
986
#define SRV_SET_MODE    2
986
#define SRV_SET_MODE    2
987
 
987
 
988
#define SRV_CREATE_VIDEO 9
988
#define SRV_CREATE_VIDEO 9
989
#define SRV_BLIT_VIDEO   10
989
#define SRV_BLIT_VIDEO   10
-
 
990
#define SRV_CREATE_BITMAP  11
-
 
991
 
990
 
992
 
991
int r600_video_blit(uint64_t src_offset, int  x, int y,
993
int r600_video_blit(uint64_t src_offset, int  x, int y,
992
                    int w, int h, int pitch);
994
                    int w, int h, int pitch);
-
 
995
 
-
 
996
#define check_input(size) \
-
 
997
    if( unlikely((inp==NULL)||(io->inp_size != (size))) )   \
-
 
998
        break;
-
 
999
 
-
 
1000
#define check_output(size) \
-
 
1001
    if( unlikely((outp==NULL)||(io->out_size != (size))) )   \
-
 
1002
        break;
993
 
1003
 
994
int _stdcall display_handler(ioctl_t *io)
1004
int _stdcall display_handler(ioctl_t *io)
995
{
1005
{
996
    int    retval = -1;
1006
    int    retval = -1;
997
    u32_t *inp;
1007
    u32_t *inp;
998
    u32_t *outp;
1008
    u32_t *outp;
999
 
1009
 
1000
    inp = io->input;
1010
    inp = io->input;
1001
    outp = io->output;
1011
    outp = io->output;
1002
 
1012
 
1003
    switch(io->io_code)
1013
    switch(io->io_code)
1004
    {
1014
    {
1005
        case SRV_GETVERSION:
1015
        case SRV_GETVERSION:
1006
            if(io->out_size==4)
1016
            check_output(4);
1007
            {
-
 
1008
                *outp  = API_VERSION;
1017
                *outp  = API_VERSION;
1009
                retval = 0;
1018
                retval = 0;
1010
            }
-
 
1011
            break;
1019
            break;
1012
 
1020
 
1013
        case SRV_ENUM_MODES:
1021
        case SRV_ENUM_MODES:
1014
            dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
1022
            dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
1015
                       inp, io->inp_size, io->out_size );
1023
                       inp, io->inp_size, io->out_size );
1016
 
-
 
1017
            if( radeon_modeset &&
1024
            check_output(4);
1018
                (outp != NULL) && (io->out_size == 4) &&
-
 
1019
                (io->inp_size == *outp * sizeof(videomode_t)) )
1025
            check_input(*outp * sizeof(videomode_t));
1020
            {
1026
            if( radeon_modeset)
1021
                retval = get_modes((videomode_t*)inp, outp);
1027
                retval = get_modes((videomode_t*)inp, outp);
1022
            };
-
 
1023
            break;
1028
            break;
1024
 
1029
 
1025
        case SRV_SET_MODE:
1030
        case SRV_SET_MODE:
1026
            dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
1031
            dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
1027
                       inp, io->inp_size);
1032
                       inp, io->inp_size);
1028
 
-
 
1029
            if(  radeon_modeset   &&
-
 
1030
                (inp != NULL) &&
-
 
1031
                (io->inp_size == sizeof(videomode_t)) )
1033
            check_input(sizeof(videomode_t));
1032
            {
1034
            if( radeon_modeset )
1033
                retval = set_user_mode((videomode_t*)inp);
1035
                retval = set_user_mode((videomode_t*)inp);
1034
            };
-
 
1035
            break;
1036
            break;
1036
 
1037
 
1037
        case SRV_CREATE_VIDEO:
1038
        case SRV_CREATE_VIDEO:
1038
            retval = r600_create_video(inp[0], inp[1], outp);
1039
            retval = r600_create_video(inp[0], inp[1], outp);
1039
            break;
1040
            break;
1040
 
1041
 
1041
        case SRV_BLIT_VIDEO:
1042
        case SRV_BLIT_VIDEO:
1042
            r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
1043
            r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
1043
                    inp[4], inp[5], inp[6]);
1044
                    inp[4], inp[5], inp[6]);
1044
 
1045
 
1045
            retval = 0;
1046
            retval = 0;
1046
            break;
1047
            break;
-
 
1048
 
-
 
1049
        case SRV_CREATE_BITMAP:
-
 
1050
            check_input(8);
-
 
1051
            check_output(4);
-
 
1052
            retval = create_bitmap(outp, inp[0], inp[1]);
-
 
1053
            break;
1047
 
1054
 
1048
    };
1055
    };
1049
 
1056
 
1050
    return retval;
1057
    return retval;
1051
}
1058
}
1052
 
1059
 
1053
static char  log[256];
1060
static char  log[256];
1054
static pci_dev_t device;
1061
static pci_dev_t device;
1055
 
1062
 
1056
u32_t drvEntry(int action, char *cmdline)
1063
u32_t drvEntry(int action, char *cmdline)
1057
{
1064
{
1058
    struct radeon_device *rdev = NULL;
1065
    struct radeon_device *rdev = NULL;
1059
 
1066
 
1060
    struct pci_device_id  *ent;
1067
    struct pci_device_id  *ent;
1061
 
1068
 
1062
    int     err;
1069
    int     err;
1063
    u32_t   retval = 0;
1070
    u32_t   retval = 0;
1064
 
1071
 
1065
    if(action != 1)
1072
    if(action != 1)
1066
        return 0;
1073
        return 0;
1067
 
1074
 
1068
    if( GetService("DISPLAY") != 0 )
1075
    if( GetService("DISPLAY") != 0 )
1069
        return 0;
1076
        return 0;
1070
 
1077
 
1071
    if( cmdline && *cmdline )
1078
    if( cmdline && *cmdline )
1072
        parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
1079
        parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
1073
 
1080
 
1074
    if(!dbg_open(log))
1081
    if(!dbg_open(log))
1075
    {
1082
    {
1076
        strcpy(log, "/RD/1/DRIVERS/atikms.log");
1083
        strcpy(log, "/RD/1/DRIVERS/atikms.log");
1077
 
1084
 
1078
        if(!dbg_open(log))
1085
        if(!dbg_open(log))
1079
        {
1086
        {
1080
            printf("Can't open %s\nExit\n", log);
1087
            printf("Can't open %s\nExit\n", log);
1081
            return 0;
1088
            return 0;
1082
        };
1089
        };
1083
    }
1090
    }
1084
    dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
1091
    dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
1085
 
1092
 
1086
    enum_pci_devices();
1093
    enum_pci_devices();
1087
 
1094
 
1088
    ent = find_pci_device(&device, pciidlist);
1095
    ent = find_pci_device(&device, pciidlist);
1089
 
1096
 
1090
    if( unlikely(ent == NULL) )
1097
    if( unlikely(ent == NULL) )
1091
    {
1098
    {
1092
        dbgprintf("device not found\n");
1099
        dbgprintf("device not found\n");
1093
        return 0;
1100
        return 0;
1094
    };
1101
    };
1095
 
1102
 
1096
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
1103
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
1097
                                device.pci_dev.device);
1104
                                device.pci_dev.device);
1098
 
1105
 
1099
    err = drm_get_dev(&device.pci_dev, ent);
1106
    err = drm_get_dev(&device.pci_dev, ent);
1100
 
1107
 
1101
    rdev = rdisplay->ddev->dev_private;
1108
    rdev = rdisplay->ddev->dev_private;
1102
 
1109
 
1103
    err = RegService("DISPLAY", display_handler);
1110
    err = RegService("DISPLAY", display_handler);
1104
 
1111
 
1105
    if( err != 0)
1112
    if( err != 0)
1106
        dbgprintf("Set DISPLAY handler\n");
1113
        dbgprintf("Set DISPLAY handler\n");
1107
 
1114
 
1108
    return err;
1115
    return err;
1109
};
1116
};
1110
 
1117
 
1111
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1118
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1112
{};
1119
{};
1113
 
1120
 
1114
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1121
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1115
{};
1122
{};