Subversion Repositories Kolibri OS

Rev

Rev 2017 | Rev 2175 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2017 Rev 2160
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
//#include 
29
 
29
 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "radeon_reg.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
34
#include "radeon.h"
35
#include "atom.h"
35
#include "atom.h"
36
#include "display.h"
36
#include "display.h"
37
 
37
 
38
#include 
38
#include 
39
 
39
 
40
 
40
 
41
int radeon_no_wb;
41
int radeon_no_wb   =  1;
42
int radeon_modeset = -1;
42
int radeon_modeset = -1;
43
int radeon_dynclks = -1;
43
int radeon_dynclks = -1;
44
int radeon_r4xx_atom = 0;
44
int radeon_r4xx_atom = 0;
45
int radeon_agpmode = 0;
45
int radeon_agpmode = 0;
46
int radeon_vram_limit = 0;
46
int radeon_vram_limit = 0;
47
int radeon_gart_size = 512; /* default gart size */
47
int radeon_gart_size = 512; /* default gart size */
48
int radeon_benchmarking = 0;
48
int radeon_benchmarking = 0;
49
int radeon_testing = 0;
49
int radeon_testing = 0;
50
int radeon_connector_table = 0;
50
int radeon_connector_table = 0;
51
int radeon_tv = 1;
51
int radeon_tv = 1;
52
int radeon_new_pll = -1;
52
int radeon_new_pll = -1;
53
int radeon_dynpm = -1;
53
int radeon_dynpm = -1;
54
int radeon_audio = 1;
54
int radeon_audio = 1;
55
int radeon_hw_i2c = 0;
55
int radeon_hw_i2c = 0;
56
int radeon_pcie_gen2 = 0;
56
int radeon_pcie_gen2 = 0;
57
int radeon_disp_priority = 0;
57
int radeon_disp_priority = 0;
-
 
58
 
58
 
59
int irq_override = 0;
59
 
60
 
60
 
61
 
61
extern display_t *rdisplay;
62
extern display_t *rdisplay;
62
 
63
 
63
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
64
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
64
int init_display(struct radeon_device *rdev, videomode_t *mode);
65
int init_display(struct radeon_device *rdev, videomode_t *mode);
65
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
66
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
66
 
67
 
67
int get_modes(videomode_t *mode, int *count);
68
int get_modes(videomode_t *mode, int *count);
68
int set_user_mode(videomode_t *mode);
69
int set_user_mode(videomode_t *mode);
69
int r100_2D_test(struct radeon_device *rdev);
70
int r100_2D_test(struct radeon_device *rdev);
70
 
71
 
71
 
72
 
72
 /* Legacy VGA regions */
73
 /* Legacy VGA regions */
73
#define VGA_RSRC_NONE          0x00
74
#define VGA_RSRC_NONE          0x00
74
#define VGA_RSRC_LEGACY_IO     0x01
75
#define VGA_RSRC_LEGACY_IO     0x01
75
#define VGA_RSRC_LEGACY_MEM    0x02
76
#define VGA_RSRC_LEGACY_MEM    0x02
76
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
77
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
77
/* Non-legacy access */
78
/* Non-legacy access */
78
#define VGA_RSRC_NORMAL_IO     0x04
79
#define VGA_RSRC_NORMAL_IO     0x04
79
#define VGA_RSRC_NORMAL_MEM    0x08
80
#define VGA_RSRC_NORMAL_MEM    0x08
80
 
81
 
81
 
82
 
82
static const char radeon_family_name[][16] = {
83
static const char radeon_family_name[][16] = {
83
	"R100",
84
	"R100",
84
	"RV100",
85
	"RV100",
85
	"RS100",
86
	"RS100",
86
	"RV200",
87
	"RV200",
87
	"RS200",
88
	"RS200",
88
	"R200",
89
	"R200",
89
	"RV250",
90
	"RV250",
90
	"RS300",
91
	"RS300",
91
	"RV280",
92
	"RV280",
92
	"R300",
93
	"R300",
93
	"R350",
94
	"R350",
94
	"RV350",
95
	"RV350",
95
	"RV380",
96
	"RV380",
96
	"R420",
97
	"R420",
97
	"R423",
98
	"R423",
98
	"RV410",
99
	"RV410",
99
	"RS400",
100
	"RS400",
100
	"RS480",
101
	"RS480",
101
	"RS600",
102
	"RS600",
102
	"RS690",
103
	"RS690",
103
	"RS740",
104
	"RS740",
104
	"RV515",
105
	"RV515",
105
	"R520",
106
	"R520",
106
	"RV530",
107
	"RV530",
107
	"RV560",
108
	"RV560",
108
	"RV570",
109
	"RV570",
109
	"R580",
110
	"R580",
110
	"R600",
111
	"R600",
111
	"RV610",
112
	"RV610",
112
	"RV630",
113
	"RV630",
113
	"RV670",
114
	"RV670",
114
	"RV620",
115
	"RV620",
115
	"RV635",
116
	"RV635",
116
	"RS780",
117
	"RS780",
117
	"RS880",
118
	"RS880",
118
	"RV770",
119
	"RV770",
119
	"RV730",
120
	"RV730",
120
	"RV710",
121
	"RV710",
121
	"RV740",
122
	"RV740",
122
	"CEDAR",
123
	"CEDAR",
123
	"REDWOOD",
124
	"REDWOOD",
124
	"JUNIPER",
125
	"JUNIPER",
125
	"CYPRESS",
126
	"CYPRESS",
126
	"HEMLOCK",
127
	"HEMLOCK",
127
	"PALM",
128
	"PALM",
128
	"SUMO",
129
	"SUMO",
129
	"SUMO2",
130
	"SUMO2",
130
	"BARTS",
131
	"BARTS",
131
	"TURKS",
132
	"TURKS",
132
	"CAICOS",
133
	"CAICOS",
133
	"CAYMAN",
134
	"CAYMAN",
134
	"LAST",
135
	"LAST",
135
};
136
};
136
 
137
 
137
/*
138
/*
138
 * Clear GPU surface registers.
139
 * Clear GPU surface registers.
139
 */
140
 */
140
void radeon_surface_init(struct radeon_device *rdev)
141
void radeon_surface_init(struct radeon_device *rdev)
141
{
142
{
142
    /* FIXME: check this out */
143
    /* FIXME: check this out */
143
    if (rdev->family < CHIP_R600) {
144
    if (rdev->family < CHIP_R600) {
144
        int i;
145
        int i;
145
 
146
 
146
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
147
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
147
           radeon_clear_surface_reg(rdev, i);
148
           radeon_clear_surface_reg(rdev, i);
148
        }
149
        }
149
		/* enable surfaces */
150
		/* enable surfaces */
150
		WREG32(RADEON_SURFACE_CNTL, 0);
151
		WREG32(RADEON_SURFACE_CNTL, 0);
151
    }
152
    }
152
}
153
}
153
 
154
 
154
/*
155
/*
155
 * GPU scratch registers helpers function.
156
 * GPU scratch registers helpers function.
156
 */
157
 */
157
void radeon_scratch_init(struct radeon_device *rdev)
158
void radeon_scratch_init(struct radeon_device *rdev)
158
{
159
{
159
    int i;
160
    int i;
160
 
161
 
161
    /* FIXME: check this out */
162
    /* FIXME: check this out */
162
    if (rdev->family < CHIP_R300) {
163
    if (rdev->family < CHIP_R300) {
163
        rdev->scratch.num_reg = 5;
164
        rdev->scratch.num_reg = 5;
164
    } else {
165
    } else {
165
        rdev->scratch.num_reg = 7;
166
        rdev->scratch.num_reg = 7;
166
    }
167
    }
167
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
168
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
168
    for (i = 0; i < rdev->scratch.num_reg; i++) {
169
    for (i = 0; i < rdev->scratch.num_reg; i++) {
169
        rdev->scratch.free[i] = true;
170
        rdev->scratch.free[i] = true;
170
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
171
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
171
    }
172
    }
172
}
173
}
173
 
174
 
174
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
175
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
175
{
176
{
176
	int i;
177
	int i;
177
 
178
 
178
	for (i = 0; i < rdev->scratch.num_reg; i++) {
179
	for (i = 0; i < rdev->scratch.num_reg; i++) {
179
		if (rdev->scratch.free[i]) {
180
		if (rdev->scratch.free[i]) {
180
			rdev->scratch.free[i] = false;
181
			rdev->scratch.free[i] = false;
181
			*reg = rdev->scratch.reg[i];
182
			*reg = rdev->scratch.reg[i];
182
			return 0;
183
			return 0;
183
		}
184
		}
184
	}
185
	}
185
	return -EINVAL;
186
	return -EINVAL;
186
}
187
}
187
 
188
 
188
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
189
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
189
{
190
{
190
	int i;
191
	int i;
191
 
192
 
192
	for (i = 0; i < rdev->scratch.num_reg; i++) {
193
	for (i = 0; i < rdev->scratch.num_reg; i++) {
193
		if (rdev->scratch.reg[i] == reg) {
194
		if (rdev->scratch.reg[i] == reg) {
194
			rdev->scratch.free[i] = true;
195
			rdev->scratch.free[i] = true;
195
			return;
196
			return;
196
		}
197
		}
197
	}
198
	}
198
}
199
}
199
 
200
 
200
void radeon_wb_disable(struct radeon_device *rdev)
201
void radeon_wb_disable(struct radeon_device *rdev)
201
{
202
{
202
	int r;
203
	int r;
203
 
204
 
204
	if (rdev->wb.wb_obj) {
205
	if (rdev->wb.wb_obj) {
205
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
206
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
206
		if (unlikely(r != 0))
207
		if (unlikely(r != 0))
207
			return;
208
			return;
208
		radeon_bo_kunmap(rdev->wb.wb_obj);
209
		radeon_bo_kunmap(rdev->wb.wb_obj);
209
		radeon_bo_unpin(rdev->wb.wb_obj);
210
		radeon_bo_unpin(rdev->wb.wb_obj);
210
		radeon_bo_unreserve(rdev->wb.wb_obj);
211
		radeon_bo_unreserve(rdev->wb.wb_obj);
211
	}
212
	}
212
	rdev->wb.enabled = false;
213
	rdev->wb.enabled = false;
213
}
214
}
214
 
215
 
215
void radeon_wb_fini(struct radeon_device *rdev)
216
void radeon_wb_fini(struct radeon_device *rdev)
216
{
217
{
217
	radeon_wb_disable(rdev);
218
	radeon_wb_disable(rdev);
218
	if (rdev->wb.wb_obj) {
219
	if (rdev->wb.wb_obj) {
219
		radeon_bo_unref(&rdev->wb.wb_obj);
220
		radeon_bo_unref(&rdev->wb.wb_obj);
220
		rdev->wb.wb = NULL;
221
		rdev->wb.wb = NULL;
221
		rdev->wb.wb_obj = NULL;
222
		rdev->wb.wb_obj = NULL;
222
	}
223
	}
223
}
224
}
224
 
225
 
225
int radeon_wb_init(struct radeon_device *rdev)
226
int radeon_wb_init(struct radeon_device *rdev)
226
{
227
{
227
	int r;
228
	int r;
228
 
229
 
229
	if (rdev->wb.wb_obj == NULL) {
230
	if (rdev->wb.wb_obj == NULL) {
230
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
231
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
231
				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
232
				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
232
		if (r) {
233
		if (r) {
233
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
234
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
234
			return r;
235
			return r;
235
		}
236
		}
236
	}
237
	}
237
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
238
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
238
	if (unlikely(r != 0)) {
239
	if (unlikely(r != 0)) {
239
		radeon_wb_fini(rdev);
240
		radeon_wb_fini(rdev);
240
		return r;
241
		return r;
241
	}
242
	}
242
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
243
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
243
			  &rdev->wb.gpu_addr);
244
			  &rdev->wb.gpu_addr);
244
	if (r) {
245
	if (r) {
245
		radeon_bo_unreserve(rdev->wb.wb_obj);
246
		radeon_bo_unreserve(rdev->wb.wb_obj);
246
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
247
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
247
		radeon_wb_fini(rdev);
248
		radeon_wb_fini(rdev);
248
		return r;
249
		return r;
249
	}
250
	}
250
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
251
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
251
	radeon_bo_unreserve(rdev->wb.wb_obj);
252
	radeon_bo_unreserve(rdev->wb.wb_obj);
252
	if (r) {
253
	if (r) {
253
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
254
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
254
		radeon_wb_fini(rdev);
255
		radeon_wb_fini(rdev);
255
		return r;
256
		return r;
256
	}
257
	}
257
 
258
 
258
	/* clear wb memory */
259
	/* clear wb memory */
259
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
260
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
260
	/* disable event_write fences */
261
	/* disable event_write fences */
261
	rdev->wb.use_event = false;
262
	rdev->wb.use_event = false;
262
	/* disabled via module param */
263
	/* disabled via module param */
263
	if (radeon_no_wb == 1)
264
	if (radeon_no_wb == 1)
264
		rdev->wb.enabled = false;
265
		rdev->wb.enabled = false;
265
	else {
266
	else {
266
		/* often unreliable on AGP */
267
		/* often unreliable on AGP */
267
//		if (rdev->flags & RADEON_IS_AGP) {
268
//		if (rdev->flags & RADEON_IS_AGP) {
268
//			rdev->wb.enabled = false;
269
//			rdev->wb.enabled = false;
269
//		} else {
270
//		} else {
270
			rdev->wb.enabled = true;
271
			rdev->wb.enabled = true;
271
			/* event_write fences are only available on r600+ */
272
			/* event_write fences are only available on r600+ */
272
			if (rdev->family >= CHIP_R600)
273
			if (rdev->family >= CHIP_R600)
273
				rdev->wb.use_event = true;
274
				rdev->wb.use_event = true;
274
//		}
275
//		}
275
	}
276
	}
276
	/* always use writeback/events on NI */
277
	/* always use writeback/events on NI */
277
	if (ASIC_IS_DCE5(rdev)) {
278
	if (ASIC_IS_DCE5(rdev)) {
278
		rdev->wb.enabled = true;
279
		rdev->wb.enabled = true;
279
		rdev->wb.use_event = true;
280
		rdev->wb.use_event = true;
280
	}
281
	}
281
 
282
 
282
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
283
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
283
 
284
 
284
	return 0;
285
	return 0;
285
}
286
}
286
 
287
 
287
/**
288
/**
288
 * radeon_vram_location - try to find VRAM location
289
 * radeon_vram_location - try to find VRAM location
289
 * @rdev: radeon device structure holding all necessary informations
290
 * @rdev: radeon device structure holding all necessary informations
290
 * @mc: memory controller structure holding memory informations
291
 * @mc: memory controller structure holding memory informations
291
 * @base: base address at which to put VRAM
292
 * @base: base address at which to put VRAM
292
 *
293
 *
293
 * Function will place try to place VRAM at base address provided
294
 * Function will place try to place VRAM at base address provided
294
 * as parameter (which is so far either PCI aperture address or
295
 * as parameter (which is so far either PCI aperture address or
295
 * for IGP TOM base address).
296
 * for IGP TOM base address).
296
 *
297
 *
297
 * If there is not enough space to fit the unvisible VRAM in the 32bits
298
 * If there is not enough space to fit the unvisible VRAM in the 32bits
298
 * address space then we limit the VRAM size to the aperture.
299
 * address space then we limit the VRAM size to the aperture.
299
 *
300
 *
300
 * If we are using AGP and if the AGP aperture doesn't allow us to have
301
 * If we are using AGP and if the AGP aperture doesn't allow us to have
301
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
302
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
302
 * size and print a warning.
303
 * size and print a warning.
303
 *
304
 *
304
 * This function will never fails, worst case are limiting VRAM.
305
 * This function will never fails, worst case are limiting VRAM.
305
 *
306
 *
306
 * Note: GTT start, end, size should be initialized before calling this
307
 * Note: GTT start, end, size should be initialized before calling this
307
 * function on AGP platform.
308
 * function on AGP platform.
308
 *
309
 *
309
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
310
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
310
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
311
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
311
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
312
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
312
 * not IGP.
313
 * not IGP.
313
 *
314
 *
314
 * Note: we use mc_vram_size as on some board we need to program the mc to
315
 * Note: we use mc_vram_size as on some board we need to program the mc to
315
 * cover the whole aperture even if VRAM size is inferior to aperture size
316
 * cover the whole aperture even if VRAM size is inferior to aperture size
316
 * Novell bug 204882 + along with lots of ubuntu ones
317
 * Novell bug 204882 + along with lots of ubuntu ones
317
 *
318
 *
318
 * Note: when limiting vram it's safe to overwritte real_vram_size because
319
 * Note: when limiting vram it's safe to overwritte real_vram_size because
319
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
320
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
320
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
321
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
321
 * ones)
322
 * ones)
322
 *
323
 *
323
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
324
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
324
 * explicitly check for that thought.
325
 * explicitly check for that thought.
325
 *
326
 *
326
 * FIXME: when reducing VRAM size align new size on power of 2.
327
 * FIXME: when reducing VRAM size align new size on power of 2.
327
 */
328
 */
328
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
329
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
329
{
330
{
330
	mc->vram_start = base;
331
	mc->vram_start = base;
331
	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
332
	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
332
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
333
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
333
		mc->real_vram_size = mc->aper_size;
334
		mc->real_vram_size = mc->aper_size;
334
		mc->mc_vram_size = mc->aper_size;
335
		mc->mc_vram_size = mc->aper_size;
335
	}
336
	}
336
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
337
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
337
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
338
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
338
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
339
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
339
		mc->real_vram_size = mc->aper_size;
340
		mc->real_vram_size = mc->aper_size;
340
		mc->mc_vram_size = mc->aper_size;
341
		mc->mc_vram_size = mc->aper_size;
341
		}
342
		}
342
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
343
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
343
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
344
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
344
			mc->mc_vram_size >> 20, mc->vram_start,
345
			mc->mc_vram_size >> 20, mc->vram_start,
345
			mc->vram_end, mc->real_vram_size >> 20);
346
			mc->vram_end, mc->real_vram_size >> 20);
346
}
347
}
347
 
348
 
348
/**
349
/**
349
 * radeon_gtt_location - try to find GTT location
350
 * radeon_gtt_location - try to find GTT location
350
 * @rdev: radeon device structure holding all necessary informations
351
 * @rdev: radeon device structure holding all necessary informations
351
 * @mc: memory controller structure holding memory informations
352
 * @mc: memory controller structure holding memory informations
352
 *
353
 *
353
 * Function will place try to place GTT before or after VRAM.
354
 * Function will place try to place GTT before or after VRAM.
354
 *
355
 *
355
 * If GTT size is bigger than space left then we ajust GTT size.
356
 * If GTT size is bigger than space left then we ajust GTT size.
356
 * Thus function will never fails.
357
 * Thus function will never fails.
357
 *
358
 *
358
 * FIXME: when reducing GTT size align new size on power of 2.
359
 * FIXME: when reducing GTT size align new size on power of 2.
359
 */
360
 */
360
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
361
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
361
{
362
{
362
	u64 size_af, size_bf;
363
	u64 size_af, size_bf;
363
 
364
 
364
	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
365
	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
365
	size_bf = mc->vram_start & ~mc->gtt_base_align;
366
	size_bf = mc->vram_start & ~mc->gtt_base_align;
366
	if (size_bf > size_af) {
367
	if (size_bf > size_af) {
367
		if (mc->gtt_size > size_bf) {
368
		if (mc->gtt_size > size_bf) {
368
			dev_warn(rdev->dev, "limiting GTT\n");
369
			dev_warn(rdev->dev, "limiting GTT\n");
369
			mc->gtt_size = size_bf;
370
			mc->gtt_size = size_bf;
370
		}
371
		}
371
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
372
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
372
	} else {
373
	} else {
373
		if (mc->gtt_size > size_af) {
374
		if (mc->gtt_size > size_af) {
374
			dev_warn(rdev->dev, "limiting GTT\n");
375
			dev_warn(rdev->dev, "limiting GTT\n");
375
			mc->gtt_size = size_af;
376
			mc->gtt_size = size_af;
376
		}
377
		}
377
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
378
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
378
	}
379
	}
379
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
380
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
380
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
381
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
381
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
382
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
382
}
383
}
383
 
384
 
384
/*
385
/*
385
 * GPU helpers function.
386
 * GPU helpers function.
386
 */
387
 */
387
bool radeon_card_posted(struct radeon_device *rdev)
388
bool radeon_card_posted(struct radeon_device *rdev)
388
{
389
{
389
	uint32_t reg;
390
	uint32_t reg;
390
 
391
 
391
	/* first check CRTCs */
392
	/* first check CRTCs */
392
	if (ASIC_IS_DCE41(rdev)) {
393
	if (ASIC_IS_DCE41(rdev)) {
393
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
394
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
394
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
395
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
395
		if (reg & EVERGREEN_CRTC_MASTER_EN)
396
		if (reg & EVERGREEN_CRTC_MASTER_EN)
396
			return true;
397
			return true;
397
	} else if (ASIC_IS_DCE4(rdev)) {
398
	} else if (ASIC_IS_DCE4(rdev)) {
398
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
399
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
399
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
400
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
400
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
401
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
401
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
402
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
402
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
403
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
403
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
404
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
404
		if (reg & EVERGREEN_CRTC_MASTER_EN)
405
		if (reg & EVERGREEN_CRTC_MASTER_EN)
405
			return true;
406
			return true;
406
	} else if (ASIC_IS_AVIVO(rdev)) {
407
	} else if (ASIC_IS_AVIVO(rdev)) {
407
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
408
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
408
		      RREG32(AVIVO_D2CRTC_CONTROL);
409
		      RREG32(AVIVO_D2CRTC_CONTROL);
409
		if (reg & AVIVO_CRTC_EN) {
410
		if (reg & AVIVO_CRTC_EN) {
410
			return true;
411
			return true;
411
		}
412
		}
412
	} else {
413
	} else {
413
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
414
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
414
		      RREG32(RADEON_CRTC2_GEN_CNTL);
415
		      RREG32(RADEON_CRTC2_GEN_CNTL);
415
		if (reg & RADEON_CRTC_EN) {
416
		if (reg & RADEON_CRTC_EN) {
416
			return true;
417
			return true;
417
		}
418
		}
418
	}
419
	}
419
 
420
 
420
	/* then check MEM_SIZE, in case the crtcs are off */
421
	/* then check MEM_SIZE, in case the crtcs are off */
421
	if (rdev->family >= CHIP_R600)
422
	if (rdev->family >= CHIP_R600)
422
		reg = RREG32(R600_CONFIG_MEMSIZE);
423
		reg = RREG32(R600_CONFIG_MEMSIZE);
423
	else
424
	else
424
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
425
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
425
 
426
 
426
	if (reg)
427
	if (reg)
427
		return true;
428
		return true;
428
 
429
 
429
	return false;
430
	return false;
430
 
431
 
431
}
432
}
432
 
433
 
433
void radeon_update_bandwidth_info(struct radeon_device *rdev)
434
void radeon_update_bandwidth_info(struct radeon_device *rdev)
434
{
435
{
435
	fixed20_12 a;
436
	fixed20_12 a;
436
	u32 sclk = rdev->pm.current_sclk;
437
	u32 sclk = rdev->pm.current_sclk;
437
	u32 mclk = rdev->pm.current_mclk;
438
	u32 mclk = rdev->pm.current_mclk;
438
 
439
 
439
	/* sclk/mclk in Mhz */
440
	/* sclk/mclk in Mhz */
440
		a.full = dfixed_const(100);
441
		a.full = dfixed_const(100);
441
		rdev->pm.sclk.full = dfixed_const(sclk);
442
		rdev->pm.sclk.full = dfixed_const(sclk);
442
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
443
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
443
		rdev->pm.mclk.full = dfixed_const(mclk);
444
		rdev->pm.mclk.full = dfixed_const(mclk);
444
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
445
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
445
 
446
 
446
	if (rdev->flags & RADEON_IS_IGP) {
447
	if (rdev->flags & RADEON_IS_IGP) {
447
		a.full = dfixed_const(16);
448
		a.full = dfixed_const(16);
448
		/* core_bandwidth = sclk(Mhz) * 16 */
449
		/* core_bandwidth = sclk(Mhz) * 16 */
449
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
450
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
450
	}
451
	}
451
}
452
}
452
 
453
 
453
bool radeon_boot_test_post_card(struct radeon_device *rdev)
454
bool radeon_boot_test_post_card(struct radeon_device *rdev)
454
{
455
{
455
	if (radeon_card_posted(rdev))
456
	if (radeon_card_posted(rdev))
456
		return true;
457
		return true;
457
 
458
 
458
	if (rdev->bios) {
459
	if (rdev->bios) {
459
		DRM_INFO("GPU not posted. posting now...\n");
460
		DRM_INFO("GPU not posted. posting now...\n");
460
		if (rdev->is_atom_bios)
461
		if (rdev->is_atom_bios)
461
			atom_asic_init(rdev->mode_info.atom_context);
462
			atom_asic_init(rdev->mode_info.atom_context);
462
		else
463
		else
463
			radeon_combios_asic_init(rdev->ddev);
464
			radeon_combios_asic_init(rdev->ddev);
464
		return true;
465
		return true;
465
	} else {
466
	} else {
466
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
467
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
467
		return false;
468
		return false;
468
	}
469
	}
469
}
470
}
470
 
471
 
471
int radeon_dummy_page_init(struct radeon_device *rdev)
472
int radeon_dummy_page_init(struct radeon_device *rdev)
472
{
473
{
473
	if (rdev->dummy_page.page)
474
	if (rdev->dummy_page.page)
474
		return 0;
475
		return 0;
475
    rdev->dummy_page.page = AllocPage();
476
    rdev->dummy_page.page = AllocPage();
476
	if (rdev->dummy_page.page == NULL)
477
	if (rdev->dummy_page.page == NULL)
477
		return -ENOMEM;
478
		return -ENOMEM;
478
    rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
479
    rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
479
	if (!rdev->dummy_page.addr) {
480
	if (!rdev->dummy_page.addr) {
480
//       __free_page(rdev->dummy_page.page);
481
//       __free_page(rdev->dummy_page.page);
481
		rdev->dummy_page.page = NULL;
482
		rdev->dummy_page.page = NULL;
482
		return -ENOMEM;
483
		return -ENOMEM;
483
	}
484
	}
484
	return 0;
485
	return 0;
485
}
486
}
486
 
487
 
487
void radeon_dummy_page_fini(struct radeon_device *rdev)
488
void radeon_dummy_page_fini(struct radeon_device *rdev)
488
{
489
{
489
	if (rdev->dummy_page.page == NULL)
490
	if (rdev->dummy_page.page == NULL)
490
		return;
491
		return;
491
    KernelFree(rdev->dummy_page.addr);
492
    KernelFree(rdev->dummy_page.addr);
492
	rdev->dummy_page.page = NULL;
493
	rdev->dummy_page.page = NULL;
493
}
494
}
494
 
495
 
495
 
496
 
496
/* ATOM accessor methods */
497
/* ATOM accessor methods */
497
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
498
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
498
{
499
{
499
    struct radeon_device *rdev = info->dev->dev_private;
500
    struct radeon_device *rdev = info->dev->dev_private;
500
    uint32_t r;
501
    uint32_t r;
501
 
502
 
502
    r = rdev->pll_rreg(rdev, reg);
503
    r = rdev->pll_rreg(rdev, reg);
503
    return r;
504
    return r;
504
}
505
}
505
 
506
 
506
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
507
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
507
{
508
{
508
    struct radeon_device *rdev = info->dev->dev_private;
509
    struct radeon_device *rdev = info->dev->dev_private;
509
 
510
 
510
    rdev->pll_wreg(rdev, reg, val);
511
    rdev->pll_wreg(rdev, reg, val);
511
}
512
}
512
 
513
 
513
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
514
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
514
{
515
{
515
    struct radeon_device *rdev = info->dev->dev_private;
516
    struct radeon_device *rdev = info->dev->dev_private;
516
    uint32_t r;
517
    uint32_t r;
517
 
518
 
518
    r = rdev->mc_rreg(rdev, reg);
519
    r = rdev->mc_rreg(rdev, reg);
519
    return r;
520
    return r;
520
}
521
}
521
 
522
 
522
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
523
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
523
{
524
{
524
    struct radeon_device *rdev = info->dev->dev_private;
525
    struct radeon_device *rdev = info->dev->dev_private;
525
 
526
 
526
    rdev->mc_wreg(rdev, reg, val);
527
    rdev->mc_wreg(rdev, reg, val);
527
}
528
}
528
 
529
 
529
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
530
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
530
{
531
{
531
    struct radeon_device *rdev = info->dev->dev_private;
532
    struct radeon_device *rdev = info->dev->dev_private;
532
 
533
 
533
    WREG32(reg*4, val);
534
    WREG32(reg*4, val);
534
}
535
}
535
 
536
 
536
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
537
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
537
{
538
{
538
    struct radeon_device *rdev = info->dev->dev_private;
539
    struct radeon_device *rdev = info->dev->dev_private;
539
    uint32_t r;
540
    uint32_t r;
540
 
541
 
541
    r = RREG32(reg*4);
542
    r = RREG32(reg*4);
542
    return r;
543
    return r;
543
}
544
}
544
 
545
 
545
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
546
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
546
{
547
{
547
	struct radeon_device *rdev = info->dev->dev_private;
548
	struct radeon_device *rdev = info->dev->dev_private;
548
 
549
 
549
	WREG32_IO(reg*4, val);
550
	WREG32_IO(reg*4, val);
550
}
551
}
551
 
552
 
552
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
553
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
553
{
554
{
554
	struct radeon_device *rdev = info->dev->dev_private;
555
	struct radeon_device *rdev = info->dev->dev_private;
555
	uint32_t r;
556
	uint32_t r;
556
 
557
 
557
	r = RREG32_IO(reg*4);
558
	r = RREG32_IO(reg*4);
558
	return r;
559
	return r;
559
}
560
}
560
 
561
 
561
int radeon_atombios_init(struct radeon_device *rdev)
562
int radeon_atombios_init(struct radeon_device *rdev)
562
{
563
{
563
	struct card_info *atom_card_info =
564
	struct card_info *atom_card_info =
564
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
565
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
565
 
566
 
566
	if (!atom_card_info)
567
	if (!atom_card_info)
567
		return -ENOMEM;
568
		return -ENOMEM;
568
 
569
 
569
	rdev->mode_info.atom_card_info = atom_card_info;
570
	rdev->mode_info.atom_card_info = atom_card_info;
570
	atom_card_info->dev = rdev->ddev;
571
	atom_card_info->dev = rdev->ddev;
571
	atom_card_info->reg_read = cail_reg_read;
572
	atom_card_info->reg_read = cail_reg_read;
572
	atom_card_info->reg_write = cail_reg_write;
573
	atom_card_info->reg_write = cail_reg_write;
573
	/* needed for iio ops */
574
	/* needed for iio ops */
574
	if (rdev->rio_mem) {
575
	if (rdev->rio_mem) {
575
		atom_card_info->ioreg_read = cail_ioreg_read;
576
		atom_card_info->ioreg_read = cail_ioreg_read;
576
		atom_card_info->ioreg_write = cail_ioreg_write;
577
		atom_card_info->ioreg_write = cail_ioreg_write;
577
	} else {
578
	} else {
578
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
579
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
579
		atom_card_info->ioreg_read = cail_reg_read;
580
		atom_card_info->ioreg_read = cail_reg_read;
580
		atom_card_info->ioreg_write = cail_reg_write;
581
		atom_card_info->ioreg_write = cail_reg_write;
581
	}
582
	}
582
	atom_card_info->mc_read = cail_mc_read;
583
	atom_card_info->mc_read = cail_mc_read;
583
	atom_card_info->mc_write = cail_mc_write;
584
	atom_card_info->mc_write = cail_mc_write;
584
	atom_card_info->pll_read = cail_pll_read;
585
	atom_card_info->pll_read = cail_pll_read;
585
	atom_card_info->pll_write = cail_pll_write;
586
	atom_card_info->pll_write = cail_pll_write;
586
 
587
 
587
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
588
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
588
	mutex_init(&rdev->mode_info.atom_context->mutex);
589
	mutex_init(&rdev->mode_info.atom_context->mutex);
589
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
590
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
590
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
591
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
591
    return 0;
592
    return 0;
592
}
593
}
593
 
594
 
594
void radeon_atombios_fini(struct radeon_device *rdev)
595
void radeon_atombios_fini(struct radeon_device *rdev)
595
{
596
{
596
	if (rdev->mode_info.atom_context) {
597
	if (rdev->mode_info.atom_context) {
597
		kfree(rdev->mode_info.atom_context->scratch);
598
		kfree(rdev->mode_info.atom_context->scratch);
598
	kfree(rdev->mode_info.atom_context);
599
	kfree(rdev->mode_info.atom_context);
599
	}
600
	}
600
	kfree(rdev->mode_info.atom_card_info);
601
	kfree(rdev->mode_info.atom_card_info);
601
}
602
}
602
 
603
 
603
int radeon_combios_init(struct radeon_device *rdev)
604
int radeon_combios_init(struct radeon_device *rdev)
604
{
605
{
605
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
606
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
606
	return 0;
607
	return 0;
607
}
608
}
608
 
609
 
609
void radeon_combios_fini(struct radeon_device *rdev)
610
void radeon_combios_fini(struct radeon_device *rdev)
610
{
611
{
611
}
612
}
612
 
613
 
613
/* if we get transitioned to only one device, tak VGA back */
614
/* if we get transitioned to only one device, tak VGA back */
614
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
615
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
615
{
616
{
616
	struct radeon_device *rdev = cookie;
617
	struct radeon_device *rdev = cookie;
617
	radeon_vga_set_state(rdev, state);
618
	radeon_vga_set_state(rdev, state);
618
	if (state)
619
	if (state)
619
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
620
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
620
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
621
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
621
	else
622
	else
622
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
623
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
623
}
624
}
624
 
625
 
625
void radeon_check_arguments(struct radeon_device *rdev)
626
void radeon_check_arguments(struct radeon_device *rdev)
626
{
627
{
627
	/* vramlimit must be a power of two */
628
	/* vramlimit must be a power of two */
628
	switch (radeon_vram_limit) {
629
	switch (radeon_vram_limit) {
629
	case 0:
630
	case 0:
630
	case 4:
631
	case 4:
631
	case 8:
632
	case 8:
632
	case 16:
633
	case 16:
633
	case 32:
634
	case 32:
634
	case 64:
635
	case 64:
635
	case 128:
636
	case 128:
636
	case 256:
637
	case 256:
637
	case 512:
638
	case 512:
638
	case 1024:
639
	case 1024:
639
	case 2048:
640
	case 2048:
640
	case 4096:
641
	case 4096:
641
		break;
642
		break;
642
	default:
643
	default:
643
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
644
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
644
				radeon_vram_limit);
645
				radeon_vram_limit);
645
		radeon_vram_limit = 0;
646
		radeon_vram_limit = 0;
646
		break;
647
		break;
647
	}
648
	}
648
	radeon_vram_limit = radeon_vram_limit << 20;
649
	radeon_vram_limit = radeon_vram_limit << 20;
649
	/* gtt size must be power of two and greater or equal to 32M */
650
	/* gtt size must be power of two and greater or equal to 32M */
650
	switch (radeon_gart_size) {
651
	switch (radeon_gart_size) {
651
	case 4:
652
	case 4:
652
	case 8:
653
	case 8:
653
	case 16:
654
	case 16:
654
		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
655
		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
655
				radeon_gart_size);
656
				radeon_gart_size);
656
		radeon_gart_size = 512;
657
		radeon_gart_size = 512;
657
		break;
658
		break;
658
	case 32:
659
	case 32:
659
	case 64:
660
	case 64:
660
	case 128:
661
	case 128:
661
	case 256:
662
	case 256:
662
	case 512:
663
	case 512:
663
	case 1024:
664
	case 1024:
664
	case 2048:
665
	case 2048:
665
	case 4096:
666
	case 4096:
666
		break;
667
		break;
667
	default:
668
	default:
668
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
669
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
669
				radeon_gart_size);
670
				radeon_gart_size);
670
		radeon_gart_size = 512;
671
		radeon_gart_size = 512;
671
		break;
672
		break;
672
	}
673
	}
673
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
674
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
674
	/* AGP mode can only be -1, 1, 2, 4, 8 */
675
	/* AGP mode can only be -1, 1, 2, 4, 8 */
675
	switch (radeon_agpmode) {
676
	switch (radeon_agpmode) {
676
	case -1:
677
	case -1:
677
	case 0:
678
	case 0:
678
	case 1:
679
	case 1:
679
	case 2:
680
	case 2:
680
	case 4:
681
	case 4:
681
	case 8:
682
	case 8:
682
		break;
683
		break;
683
	default:
684
	default:
684
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
685
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
685
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
686
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
686
		radeon_agpmode = 0;
687
		radeon_agpmode = 0;
687
		break;
688
		break;
688
	}
689
	}
689
}
690
}
690
 
691
 
691
int radeon_device_init(struct radeon_device *rdev,
692
int radeon_device_init(struct radeon_device *rdev,
692
               struct drm_device *ddev,
693
               struct drm_device *ddev,
693
               struct pci_dev *pdev,
694
               struct pci_dev *pdev,
694
               uint32_t flags)
695
               uint32_t flags)
695
{
696
{
696
	int r, i;
697
	int r, i;
697
	int dma_bits;
698
	int dma_bits;
698
 
699
 
699
    rdev->shutdown = false;
700
    rdev->shutdown = false;
700
    rdev->ddev = ddev;
701
    rdev->ddev = ddev;
701
    rdev->pdev = pdev;
702
    rdev->pdev = pdev;
702
    rdev->flags = flags;
703
    rdev->flags = flags;
703
    rdev->family = flags & RADEON_FAMILY_MASK;
704
    rdev->family = flags & RADEON_FAMILY_MASK;
704
    rdev->is_atom_bios = false;
705
    rdev->is_atom_bios = false;
705
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
706
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
706
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
707
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
707
    rdev->gpu_lockup = false;
708
    rdev->gpu_lockup = false;
708
	rdev->accel_working = false;
709
	rdev->accel_working = false;
709
 
710
 
710
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
711
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
711
		radeon_family_name[rdev->family], pdev->vendor, pdev->device);
712
		radeon_family_name[rdev->family], pdev->vendor, pdev->device);
712
 
713
 
713
    /* mutex initialization are all done here so we
714
    /* mutex initialization are all done here so we
714
     * can recall function without having locking issues */
715
     * can recall function without having locking issues */
715
    mutex_init(&rdev->cs_mutex);
716
    mutex_init(&rdev->cs_mutex);
716
    mutex_init(&rdev->ib_pool.mutex);
717
    mutex_init(&rdev->ib_pool.mutex);
717
    mutex_init(&rdev->cp.mutex);
718
    mutex_init(&rdev->cp.mutex);
718
	mutex_init(&rdev->dc_hw_i2c_mutex);
719
	mutex_init(&rdev->dc_hw_i2c_mutex);
719
	if (rdev->family >= CHIP_R600)
720
	if (rdev->family >= CHIP_R600)
720
		spin_lock_init(&rdev->ih.lock);
721
		spin_lock_init(&rdev->ih.lock);
721
	mutex_init(&rdev->gem.mutex);
722
	mutex_init(&rdev->gem.mutex);
722
	mutex_init(&rdev->pm.mutex);
723
	mutex_init(&rdev->pm.mutex);
723
	mutex_init(&rdev->vram_mutex);
724
	mutex_init(&rdev->vram_mutex);
724
	rwlock_init(&rdev->fence_drv.lock);
725
	rwlock_init(&rdev->fence_drv.lock);
725
	INIT_LIST_HEAD(&rdev->gem.objects);
726
	INIT_LIST_HEAD(&rdev->gem.objects);
726
 
727
 
727
	/* Set asic functions */
728
	/* Set asic functions */
728
	r = radeon_asic_init(rdev);
729
	r = radeon_asic_init(rdev);
729
	if (r)
730
	if (r)
730
		return r;
731
		return r;
731
	radeon_check_arguments(rdev);
732
	radeon_check_arguments(rdev);
732
 
733
 
733
	/* all of the newer IGP chips have an internal gart
734
	/* all of the newer IGP chips have an internal gart
734
	 * However some rs4xx report as AGP, so remove that here.
735
	 * However some rs4xx report as AGP, so remove that here.
735
	 */
736
	 */
736
	if ((rdev->family >= CHIP_RS400) &&
737
	if ((rdev->family >= CHIP_RS400) &&
737
	    (rdev->flags & RADEON_IS_IGP)) {
738
	    (rdev->flags & RADEON_IS_IGP)) {
738
		rdev->flags &= ~RADEON_IS_AGP;
739
		rdev->flags &= ~RADEON_IS_AGP;
739
	}
740
	}
740
 
741
 
741
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
742
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
742
		radeon_agp_disable(rdev);
743
		radeon_agp_disable(rdev);
743
    }
744
    }
744
 
745
 
745
	/* set DMA mask + need_dma32 flags.
746
	/* set DMA mask + need_dma32 flags.
746
	 * PCIE - can handle 40-bits.
747
	 * PCIE - can handle 40-bits.
747
	 * IGP - can handle 40-bits (in theory)
748
	 * IGP - can handle 40-bits (in theory)
748
	 * AGP - generally dma32 is safest
749
	 * AGP - generally dma32 is safest
749
	 * PCI - only dma32
750
	 * PCI - only dma32
750
	 */
751
	 */
751
	rdev->need_dma32 = false;
752
	rdev->need_dma32 = false;
752
	if (rdev->flags & RADEON_IS_AGP)
753
	if (rdev->flags & RADEON_IS_AGP)
753
		rdev->need_dma32 = true;
754
		rdev->need_dma32 = true;
754
	if (rdev->flags & RADEON_IS_PCI)
755
	if (rdev->flags & RADEON_IS_PCI)
755
		rdev->need_dma32 = true;
756
		rdev->need_dma32 = true;
756
 
757
 
757
	dma_bits = rdev->need_dma32 ? 32 : 40;
758
	dma_bits = rdev->need_dma32 ? 32 : 40;
758
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
759
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
759
    if (r) {
760
    if (r) {
760
		rdev->need_dma32 = true;
761
		rdev->need_dma32 = true;
761
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
762
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
762
    }
763
    }
763
 
764
 
764
    /* Registers mapping */
765
    /* Registers mapping */
765
    /* TODO: block userspace mapping of io register */
766
    /* TODO: block userspace mapping of io register */
766
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
767
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
767
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
768
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
768
 
769
 
769
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
770
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
770
                                   PG_SW+PG_NOCACHE);
771
                                   PG_SW+PG_NOCACHE);
771
 
772
 
772
    if (rdev->rmmio == NULL) {
773
    if (rdev->rmmio == NULL) {
773
        return -ENOMEM;
774
        return -ENOMEM;
774
    }
775
    }
775
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
776
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
776
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
777
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
777
 
778
 
778
	r = radeon_init(rdev);
779
	r = radeon_init(rdev);
779
	if (r)
780
	if (r)
780
        return r;
781
        return r;
781
 
782
 
782
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
783
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
783
		/* Acceleration not working on AGP card try again
784
		/* Acceleration not working on AGP card try again
784
		 * with fallback to PCI or PCIE GART
785
		 * with fallback to PCI or PCIE GART
785
		 */
786
		 */
786
		radeon_asic_reset(rdev);
787
		radeon_asic_reset(rdev);
787
		radeon_fini(rdev);
788
		radeon_fini(rdev);
788
		radeon_agp_disable(rdev);
789
		radeon_agp_disable(rdev);
789
		r = radeon_init(rdev);
790
		r = radeon_init(rdev);
790
		if (r)
791
		if (r)
791
		return r;
792
		return r;
792
	}
793
	}
793
//	if (radeon_testing) {
794
//	if (radeon_testing) {
794
//		radeon_test_moves(rdev);
795
//		radeon_test_moves(rdev);
795
//    }
796
//    }
796
   if (radeon_benchmarking) {
797
   if (radeon_benchmarking) {
797
       radeon_benchmark(rdev);
798
       radeon_benchmark(rdev);
798
    }
799
    }
799
	return 0;
800
	return 0;
800
}
801
}
801
 
802
 
802
 
803
 
803
/*
804
/*
804
 * Driver load/unload
805
 * Driver load/unload
805
 */
806
 */
806
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
807
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
807
{
808
{
808
    struct radeon_device *rdev;
809
    struct radeon_device *rdev;
809
    int r;
810
    int r;
810
 
811
 
811
    ENTER();
812
    ENTER();
812
 
813
 
813
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
814
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
814
    if (rdev == NULL) {
815
    if (rdev == NULL) {
815
        return -ENOMEM;
816
        return -ENOMEM;
816
    };
817
    };
817
 
818
 
818
    dev->dev_private = (void *)rdev;
819
    dev->dev_private = (void *)rdev;
819
 
820
 
820
    /* update BUS flag */
821
    /* update BUS flag */
821
    if (drm_device_is_agp(dev)) {
822
    if (drm_device_is_agp(dev)) {
822
        flags |= RADEON_IS_AGP;
823
        flags |= RADEON_IS_AGP;
823
    } else if (drm_device_is_pcie(dev)) {
824
    } else if (drm_device_is_pcie(dev)) {
824
        flags |= RADEON_IS_PCIE;
825
        flags |= RADEON_IS_PCIE;
825
    } else {
826
    } else {
826
        flags |= RADEON_IS_PCI;
827
        flags |= RADEON_IS_PCI;
827
    }
828
    }
828
 
829
 
829
    /* radeon_device_init should report only fatal error
830
    /* radeon_device_init should report only fatal error
830
     * like memory allocation failure or iomapping failure,
831
     * like memory allocation failure or iomapping failure,
831
     * or memory manager initialization failure, it must
832
     * or memory manager initialization failure, it must
832
     * properly initialize the GPU MC controller and permit
833
     * properly initialize the GPU MC controller and permit
833
     * VRAM allocation
834
     * VRAM allocation
834
     */
835
     */
835
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
836
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
836
    if (r) {
837
    if (r) {
837
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
838
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
838
        return r;
839
        return r;
839
    }
840
    }
840
    /* Again modeset_init should fail only on fatal error
841
    /* Again modeset_init should fail only on fatal error
841
     * otherwise it should provide enough functionalities
842
     * otherwise it should provide enough functionalities
842
     * for shadowfb to run
843
     * for shadowfb to run
843
     */
844
     */
844
    if( radeon_modeset )
845
    if( radeon_modeset )
845
    {
846
    {
846
        r = radeon_modeset_init(rdev);
847
        r = radeon_modeset_init(rdev);
847
        if (r) {
848
        if (r) {
848
            return r;
849
            return r;
849
        }
850
        }
850
    };
851
    };
851
    return 0;
852
    return 0;
852
}
853
}
853
 
854
 
854
videomode_t usermode;
855
videomode_t usermode;
855
 
856
 
856
 
857
 
857
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
858
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
858
{
859
{
859
    static struct drm_device *dev;
860
    static struct drm_device *dev;
860
    int ret;
861
    int ret;
861
 
862
 
862
    ENTER();
863
    ENTER();
863
 
864
 
864
    dev = kzalloc(sizeof(*dev), 0);
865
    dev = kzalloc(sizeof(*dev), 0);
865
    if (!dev)
866
    if (!dev)
866
        return -ENOMEM;
867
        return -ENOMEM;
867
 
868
 
868
 //   ret = pci_enable_device(pdev);
869
 //   ret = pci_enable_device(pdev);
869
 //   if (ret)
870
 //   if (ret)
870
 //       goto err_g1;
871
 //       goto err_g1;
871
 
872
 
872
 //   pci_set_master(pdev);
873
 //   pci_set_master(pdev);
873
 
874
 
874
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
875
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
875
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
876
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
876
 //       goto err_g2;
877
 //       goto err_g2;
877
 //   }
878
 //   }
878
 
879
 
879
    dev->pdev = pdev;
880
    dev->pdev = pdev;
880
    dev->pci_device = pdev->device;
881
    dev->pci_device = pdev->device;
881
    dev->pci_vendor = pdev->vendor;
882
    dev->pci_vendor = pdev->vendor;
882
 
883
 
883
    INIT_LIST_HEAD(&dev->filelist);
884
    INIT_LIST_HEAD(&dev->filelist);
884
    INIT_LIST_HEAD(&dev->ctxlist);
885
    INIT_LIST_HEAD(&dev->ctxlist);
885
    INIT_LIST_HEAD(&dev->vmalist);
886
    INIT_LIST_HEAD(&dev->vmalist);
886
    INIT_LIST_HEAD(&dev->maplist);
887
    INIT_LIST_HEAD(&dev->maplist);
887
 
888
 
888
    spin_lock_init(&dev->count_lock);
889
    spin_lock_init(&dev->count_lock);
889
    mutex_init(&dev->struct_mutex);
890
    mutex_init(&dev->struct_mutex);
890
    mutex_init(&dev->ctxlist_mutex);
891
    mutex_init(&dev->ctxlist_mutex);
891
 
892
 
892
 
893
 
893
    ret = radeon_driver_load_kms(dev, ent->driver_data );
894
    ret = radeon_driver_load_kms(dev, ent->driver_data );
894
    if (ret)
895
    if (ret)
895
        goto err_g4;
896
        goto err_g4;
896
 
897
 
897
    if( radeon_modeset )
898
    if( radeon_modeset )
898
        init_display_kms(dev->dev_private, &usermode);
899
        init_display_kms(dev->dev_private, &usermode);
899
    else
900
    else
900
        init_display(dev->dev_private, &usermode);
901
        init_display(dev->dev_private, &usermode);
-
 
902
 
-
 
903
 
-
 
904
    uint32_t route0 = PciRead32(0, 31<<3, 0x60);
-
 
905
 
-
 
906
    uint32_t route1 = PciRead32(0, 31<<3, 0x68);
-
 
907
 
-
 
908
    uint8_t elcr0 = in8(0x4D0);
-
 
909
    uint8_t elcr1 = in8(0x4D1);
-
 
910
 
-
 
911
    dbgprintf("pci route: %x %x elcr: %x %x\n", route0, route1, elcr0, elcr1);
901
 
912
 
902
    LEAVE();
913
    LEAVE();
903
 
914
 
904
    return 0;
915
    return 0;
905
 
916
 
906
err_g4:
917
err_g4:
907
//    drm_put_minor(&dev->primary);
918
//    drm_put_minor(&dev->primary);
908
//err_g3:
919
//err_g3:
909
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
920
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
910
//        drm_put_minor(&dev->control);
921
//        drm_put_minor(&dev->control);
911
//err_g2:
922
//err_g2:
912
//    pci_disable_device(pdev);
923
//    pci_disable_device(pdev);
913
//err_g1:
924
//err_g1:
914
    free(dev);
925
    free(dev);
915
 
926
 
916
    LEAVE();
927
    LEAVE();
917
 
928
 
918
    return ret;
929
    return ret;
919
}
930
}
920
 
931
 
921
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
932
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
922
{
933
{
923
    return pci_resource_start(dev->pdev, resource);
934
    return pci_resource_start(dev->pdev, resource);
924
}
935
}
925
 
936
 
926
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
937
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
927
{
938
{
928
    return pci_resource_len(dev->pdev, resource);
939
    return pci_resource_len(dev->pdev, resource);
929
}
940
}
930
 
941
 
931
 
942
 
932
uint32_t __div64_32(uint64_t *n, uint32_t base)
943
uint32_t __div64_32(uint64_t *n, uint32_t base)
933
{
944
{
934
        uint64_t rem = *n;
945
        uint64_t rem = *n;
935
        uint64_t b = base;
946
        uint64_t b = base;
936
        uint64_t res, d = 1;
947
        uint64_t res, d = 1;
937
        uint32_t high = rem >> 32;
948
        uint32_t high = rem >> 32;
938
 
949
 
939
        /* Reduce the thing a bit first */
950
        /* Reduce the thing a bit first */
940
        res = 0;
951
        res = 0;
941
        if (high >= base) {
952
        if (high >= base) {
942
                high /= base;
953
                high /= base;
943
                res = (uint64_t) high << 32;
954
                res = (uint64_t) high << 32;
944
                rem -= (uint64_t) (high*base) << 32;
955
                rem -= (uint64_t) (high*base) << 32;
945
        }
956
        }
946
 
957
 
947
        while ((int64_t)b > 0 && b < rem) {
958
        while ((int64_t)b > 0 && b < rem) {
948
                b = b+b;
959
                b = b+b;
949
                d = d+d;
960
                d = d+d;
950
        }
961
        }
951
 
962
 
952
        do {
963
        do {
953
                if (rem >= b) {
964
                if (rem >= b) {
954
                        rem -= b;
965
                        rem -= b;
955
                        res += d;
966
                        res += d;
956
                }
967
                }
957
                b >>= 1;
968
                b >>= 1;
958
                d >>= 1;
969
                d >>= 1;
959
        } while (d);
970
        } while (d);
960
 
971
 
961
        *n = res;
972
        *n = res;
962
        return rem;
973
        return rem;
963
}
974
}
964
 
975
 
965
 
976
 
966
static struct pci_device_id pciidlist[] = {
977
static struct pci_device_id pciidlist[] = {
967
    radeon_PCI_IDS
978
    radeon_PCI_IDS
968
};
979
};
969
 
980
 
970
 
981
 
971
#define API_VERSION     0x01000100
982
#define API_VERSION     0x01000100
972
 
983
 
973
#define SRV_GETVERSION  0
984
#define SRV_GETVERSION  0
974
#define SRV_ENUM_MODES  1
985
#define SRV_ENUM_MODES  1
975
#define SRV_SET_MODE    2
986
#define SRV_SET_MODE    2
976
 
987
 
977
#define SRV_CREATE_VIDEO 9
988
#define SRV_CREATE_VIDEO 9
978
#define SRV_BLIT_VIDEO   10
989
#define SRV_BLIT_VIDEO   10
979
 
990
 
980
int r600_video_blit(uint64_t src_offset, int  x, int y,
991
int r600_video_blit(uint64_t src_offset, int  x, int y,
981
                    int w, int h, int pitch);
992
                    int w, int h, int pitch);
982
 
993
 
983
int _stdcall display_handler(ioctl_t *io)
994
int _stdcall display_handler(ioctl_t *io)
984
{
995
{
985
    int    retval = -1;
996
    int    retval = -1;
986
    u32_t *inp;
997
    u32_t *inp;
987
    u32_t *outp;
998
    u32_t *outp;
988
 
999
 
989
    inp = io->input;
1000
    inp = io->input;
990
    outp = io->output;
1001
    outp = io->output;
991
 
1002
 
992
    switch(io->io_code)
1003
    switch(io->io_code)
993
    {
1004
    {
994
        case SRV_GETVERSION:
1005
        case SRV_GETVERSION:
995
            if(io->out_size==4)
1006
            if(io->out_size==4)
996
            {
1007
            {
997
                *outp  = API_VERSION;
1008
                *outp  = API_VERSION;
998
                retval = 0;
1009
                retval = 0;
999
            }
1010
            }
1000
            break;
1011
            break;
1001
 
1012
 
1002
        case SRV_ENUM_MODES:
1013
        case SRV_ENUM_MODES:
1003
            dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
1014
            dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
1004
                       inp, io->inp_size, io->out_size );
1015
                       inp, io->inp_size, io->out_size );
1005
 
1016
 
1006
            if( radeon_modeset &&
1017
            if( radeon_modeset &&
1007
                (outp != NULL) && (io->out_size == 4) &&
1018
                (outp != NULL) && (io->out_size == 4) &&
1008
                (io->inp_size == *outp * sizeof(videomode_t)) )
1019
                (io->inp_size == *outp * sizeof(videomode_t)) )
1009
            {
1020
            {
1010
                retval = get_modes((videomode_t*)inp, outp);
1021
                retval = get_modes((videomode_t*)inp, outp);
1011
            };
1022
            };
1012
            break;
1023
            break;
1013
 
1024
 
1014
        case SRV_SET_MODE:
1025
        case SRV_SET_MODE:
1015
            dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
1026
            dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
1016
                       inp, io->inp_size);
1027
                       inp, io->inp_size);
1017
 
1028
 
1018
            if(  radeon_modeset   &&
1029
            if(  radeon_modeset   &&
1019
                (inp != NULL) &&
1030
                (inp != NULL) &&
1020
                (io->inp_size == sizeof(videomode_t)) )
1031
                (io->inp_size == sizeof(videomode_t)) )
1021
            {
1032
            {
1022
                retval = set_user_mode((videomode_t*)inp);
1033
                retval = set_user_mode((videomode_t*)inp);
1023
            };
1034
            };
1024
            break;
1035
            break;
1025
 
1036
 
1026
        case SRV_CREATE_VIDEO:
1037
        case SRV_CREATE_VIDEO:
1027
            retval = r600_create_video(inp[0], inp[1], outp);
1038
            retval = r600_create_video(inp[0], inp[1], outp);
1028
            break;
1039
            break;
1029
 
1040
 
1030
        case SRV_BLIT_VIDEO:
1041
        case SRV_BLIT_VIDEO:
1031
            r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
1042
            r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
1032
                    inp[4], inp[5], inp[6]);
1043
                    inp[4], inp[5], inp[6]);
1033
 
1044
 
1034
            retval = 0;
1045
            retval = 0;
1035
            break;
1046
            break;
1036
 
1047
 
1037
    };
1048
    };
1038
 
1049
 
1039
    return retval;
1050
    return retval;
1040
}
1051
}
1041
 
1052
 
1042
static char  log[256];
1053
static char  log[256];
1043
static pci_dev_t device;
1054
static pci_dev_t device;
1044
 
-
 
1045
u32_t
-
 
1046
#if defined(__GNUC__) && __GNUC__ >= 4
-
 
1047
// has sense only if -fwhole-program is used, like Makefile.lto
-
 
1048
__attribute__((externally_visible))
-
 
1049
#endif
1055
 
1050
drvEntry(int action, char *cmdline)
1056
u32_t drvEntry(int action, char *cmdline)
1051
{
1057
{
1052
    struct radeon_device *rdev = NULL;
1058
    struct radeon_device *rdev = NULL;
1053
 
1059
 
1054
    struct pci_device_id  *ent;
1060
    struct pci_device_id  *ent;
1055
 
1061
 
1056
    int     err;
1062
    int     err;
1057
    u32_t   retval = 0;
1063
    u32_t   retval = 0;
1058
 
1064
 
1059
    if(action != 1)
1065
    if(action != 1)
1060
        return 0;
1066
        return 0;
1061
 
1067
 
1062
    if( GetService("DISPLAY") != 0 )
1068
    if( GetService("DISPLAY") != 0 )
1063
        return 0;
1069
        return 0;
1064
 
1070
 
1065
    if( cmdline && *cmdline )
1071
    if( cmdline && *cmdline )
1066
        parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
1072
        parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
1067
 
1073
 
1068
    if(!dbg_open(log))
1074
    if(!dbg_open(log))
1069
    {
1075
    {
1070
        strcpy(log, "/RD/1/DRIVERS/atikms.log");
1076
        strcpy(log, "/RD/1/DRIVERS/atikms.log");
1071
 
1077
 
1072
        if(!dbg_open(log))
1078
        if(!dbg_open(log))
1073
        {
1079
        {
1074
            printf("Can't open %s\nExit\n", log);
1080
            printf("Can't open %s\nExit\n", log);
1075
            return 0;
1081
            return 0;
1076
        };
1082
        };
1077
    }
1083
    }
1078
    dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
1084
    dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
1079
 
1085
 
1080
    enum_pci_devices();
1086
    enum_pci_devices();
1081
 
1087
 
1082
    ent = find_pci_device(&device, pciidlist);
1088
    ent = find_pci_device(&device, pciidlist);
1083
 
1089
 
1084
    if( unlikely(ent == NULL) )
1090
    if( unlikely(ent == NULL) )
1085
    {
1091
    {
1086
        dbgprintf("device not found\n");
1092
        dbgprintf("device not found\n");
1087
        return 0;
1093
        return 0;
1088
    };
1094
    };
1089
 
1095
 
1090
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
1096
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
1091
                                device.pci_dev.device);
1097
                                device.pci_dev.device);
1092
 
1098
 
1093
    err = drm_get_dev(&device.pci_dev, ent);
1099
    err = drm_get_dev(&device.pci_dev, ent);
1094
 
1100
 
1095
    rdev = rdisplay->ddev->dev_private;
1101
    rdev = rdisplay->ddev->dev_private;
1096
 
1102
 
1097
    err = RegService("DISPLAY", display_handler);
1103
    err = RegService("DISPLAY", display_handler);
1098
 
1104
 
1099
    if( err != 0)
1105
    if( err != 0)
1100
        dbgprintf("Set DISPLAY handler\n");
1106
        dbgprintf("Set DISPLAY handler\n");
1101
 
1107
 
1102
    return err;
1108
    return err;
1103
};
1109
};
1104
 
1110
 
1105
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1111
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1106
{};
1112
{};
1107
 
1113
 
1108
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1114
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1109
{};
1115
{};