Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 5179 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5097
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
//#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "radeon_reg.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
34
#include "radeon.h"
35
#include "atom.h"
35
#include "atom.h"
36
 
36
 
37
#include "bitmap.h"
37
#include "bitmap.h"
38
#include "display.h"
38
#include "display.h"
39
 
39
 
40
 
40
 
41
#include 
41
#include 
42
 
42
 
43
#define PCI_VENDOR_ID_ATI               0x1002
43
#define PCI_VENDOR_ID_ATI               0x1002
44
#define PCI_VENDOR_ID_APPLE             0x106b
44
#define PCI_VENDOR_ID_APPLE             0x106b
45
 
45
 
46
int radeon_no_wb;
46
int radeon_no_wb;
47
int radeon_modeset = -1;
47
int radeon_modeset = -1;
48
int radeon_dynclks = -1;
48
int radeon_dynclks = -1;
49
int radeon_r4xx_atom = 0;
49
int radeon_r4xx_atom = 0;
50
int radeon_agpmode = 0;
50
int radeon_agpmode = 0;
51
int radeon_vram_limit = 0;
51
int radeon_vram_limit = 0;
52
int radeon_gart_size = -1; /* auto */
52
int radeon_gart_size = -1; /* auto */
53
int radeon_benchmarking = 0;
53
int radeon_benchmarking = 0;
54
int radeon_testing = 0;
54
int radeon_testing = 0;
55
int radeon_connector_table = 0;
55
int radeon_connector_table = 0;
56
int radeon_tv = 1;
56
int radeon_tv = 1;
57
int radeon_audio = -1;
57
int radeon_audio = -1;
58
int radeon_disp_priority = 0;
58
int radeon_disp_priority = 0;
59
int radeon_hw_i2c = 0;
59
int radeon_hw_i2c = 0;
60
int radeon_pcie_gen2 = -1;
60
int radeon_pcie_gen2 = -1;
61
int radeon_msi = -1;
61
int radeon_msi = -1;
62
int radeon_lockup_timeout = 10000;
62
int radeon_lockup_timeout = 10000;
63
int radeon_fastfb = 0;
63
int radeon_fastfb = 0;
64
int radeon_dpm = -1;
64
int radeon_dpm = -1;
65
int radeon_aspm = -1;
65
int radeon_aspm = -1;
66
int radeon_runtime_pm = -1;
66
int radeon_runtime_pm = -1;
67
int radeon_hard_reset = 0;
67
int radeon_hard_reset = 0;
68
int radeon_vm_size = 8;
68
int radeon_vm_size = 8;
69
int radeon_vm_block_size = -1;
69
int radeon_vm_block_size = -1;
70
int radeon_deep_color = 0;
70
int radeon_deep_color = 0;
71
int radeon_use_pflipirq = 2;
71
int radeon_use_pflipirq = 2;
72
int irq_override = 0;
72
int irq_override = 0;
73
int radeon_bapm = -1;
73
int radeon_bapm = -1;
74
 
74
 
75
 
75
 
76
extern display_t *os_display;
76
extern display_t *os_display;
77
extern struct drm_device *main_device;
77
extern struct drm_device *main_device;
78
extern videomode_t usermode;
78
extern videomode_t usermode;
79
 
79
 
80
 
80
 
81
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
81
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
82
int init_display(struct radeon_device *rdev, videomode_t *mode);
82
int init_display(struct radeon_device *rdev, videomode_t *mode);
83
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
83
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
84
 
84
 
85
int get_modes(videomode_t *mode, u32_t *count);
85
int get_modes(videomode_t *mode, u32_t *count);
86
int set_user_mode(videomode_t *mode);
86
int set_user_mode(videomode_t *mode);
87
int r100_2D_test(struct radeon_device *rdev);
87
int r100_2D_test(struct radeon_device *rdev);
88
 
88
 
89
 
89
 
90
 /* Legacy VGA regions */
90
 /* Legacy VGA regions */
91
#define VGA_RSRC_NONE          0x00
91
#define VGA_RSRC_NONE          0x00
92
#define VGA_RSRC_LEGACY_IO     0x01
92
#define VGA_RSRC_LEGACY_IO     0x01
93
#define VGA_RSRC_LEGACY_MEM    0x02
93
#define VGA_RSRC_LEGACY_MEM    0x02
94
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
94
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
95
/* Non-legacy access */
95
/* Non-legacy access */
96
#define VGA_RSRC_NORMAL_IO     0x04
96
#define VGA_RSRC_NORMAL_IO     0x04
97
#define VGA_RSRC_NORMAL_MEM    0x08
97
#define VGA_RSRC_NORMAL_MEM    0x08
98
 
98
 
99
 
99
 
100
static const char radeon_family_name[][16] = {
100
static const char radeon_family_name[][16] = {
101
	"R100",
101
	"R100",
102
	"RV100",
102
	"RV100",
103
	"RS100",
103
	"RS100",
104
	"RV200",
104
	"RV200",
105
	"RS200",
105
	"RS200",
106
	"R200",
106
	"R200",
107
	"RV250",
107
	"RV250",
108
	"RS300",
108
	"RS300",
109
	"RV280",
109
	"RV280",
110
	"R300",
110
	"R300",
111
	"R350",
111
	"R350",
112
	"RV350",
112
	"RV350",
113
	"RV380",
113
	"RV380",
114
	"R420",
114
	"R420",
115
	"R423",
115
	"R423",
116
	"RV410",
116
	"RV410",
117
	"RS400",
117
	"RS400",
118
	"RS480",
118
	"RS480",
119
	"RS600",
119
	"RS600",
120
	"RS690",
120
	"RS690",
121
	"RS740",
121
	"RS740",
122
	"RV515",
122
	"RV515",
123
	"R520",
123
	"R520",
124
	"RV530",
124
	"RV530",
125
	"RV560",
125
	"RV560",
126
	"RV570",
126
	"RV570",
127
	"R580",
127
	"R580",
128
	"R600",
128
	"R600",
129
	"RV610",
129
	"RV610",
130
	"RV630",
130
	"RV630",
131
	"RV670",
131
	"RV670",
132
	"RV620",
132
	"RV620",
133
	"RV635",
133
	"RV635",
134
	"RS780",
134
	"RS780",
135
	"RS880",
135
	"RS880",
136
	"RV770",
136
	"RV770",
137
	"RV730",
137
	"RV730",
138
	"RV710",
138
	"RV710",
139
	"RV740",
139
	"RV740",
140
	"CEDAR",
140
	"CEDAR",
141
	"REDWOOD",
141
	"REDWOOD",
142
	"JUNIPER",
142
	"JUNIPER",
143
	"CYPRESS",
143
	"CYPRESS",
144
	"HEMLOCK",
144
	"HEMLOCK",
145
	"PALM",
145
	"PALM",
146
	"SUMO",
146
	"SUMO",
147
	"SUMO2",
147
	"SUMO2",
148
	"BARTS",
148
	"BARTS",
149
	"TURKS",
149
	"TURKS",
150
	"CAICOS",
150
	"CAICOS",
151
	"CAYMAN",
151
	"CAYMAN",
152
	"ARUBA",
152
	"ARUBA",
153
	"TAHITI",
153
	"TAHITI",
154
	"PITCAIRN",
154
	"PITCAIRN",
155
	"VERDE",
155
	"VERDE",
156
	"OLAND",
156
	"OLAND",
157
	"HAINAN",
157
	"HAINAN",
158
	"BONAIRE",
158
	"BONAIRE",
159
	"KAVERI",
159
	"KAVERI",
160
	"KABINI",
160
	"KABINI",
161
	"HAWAII",
161
	"HAWAII",
162
	"MULLINS",
162
	"MULLINS",
163
	"LAST",
163
	"LAST",
164
};
164
};
165
 
165
 
166
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
166
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
167
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
167
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
168
 
168
 
169
struct radeon_px_quirk {
169
struct radeon_px_quirk {
170
	u32 chip_vendor;
170
	u32 chip_vendor;
171
	u32 chip_device;
171
	u32 chip_device;
172
	u32 subsys_vendor;
172
	u32 subsys_vendor;
173
	u32 subsys_device;
173
	u32 subsys_device;
174
	u32 px_quirk_flags;
174
	u32 px_quirk_flags;
175
};
175
};
176
 
176
 
177
static struct radeon_px_quirk radeon_px_quirk_list[] = {
177
static struct radeon_px_quirk radeon_px_quirk_list[] = {
178
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
178
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
179
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
179
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
180
	 */
180
	 */
181
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
181
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
182
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
182
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
183
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
183
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
184
	 */
184
	 */
185
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
185
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
186
	/* macbook pro 8.2 */
186
	/* macbook pro 8.2 */
187
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
187
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
188
	{ 0, 0, 0, 0, 0 },
188
	{ 0, 0, 0, 0, 0 },
189
};
189
};
190
 
190
 
191
bool radeon_is_px(struct drm_device *dev)
191
bool radeon_is_px(struct drm_device *dev)
192
{
192
{
193
	struct radeon_device *rdev = dev->dev_private;
193
	struct radeon_device *rdev = dev->dev_private;
194
 
194
 
195
	if (rdev->flags & RADEON_IS_PX)
195
	if (rdev->flags & RADEON_IS_PX)
196
		return true;
196
		return true;
197
	return false;
197
	return false;
198
}
198
}
199
 
199
 
200
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
200
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
201
{
201
{
202
	struct radeon_px_quirk *p = radeon_px_quirk_list;
202
	struct radeon_px_quirk *p = radeon_px_quirk_list;
203
 
203
 
204
	/* Apply PX quirks */
204
	/* Apply PX quirks */
205
	while (p && p->chip_device != 0) {
205
	while (p && p->chip_device != 0) {
206
		if (rdev->pdev->vendor == p->chip_vendor &&
206
		if (rdev->pdev->vendor == p->chip_vendor &&
207
		    rdev->pdev->device == p->chip_device &&
207
		    rdev->pdev->device == p->chip_device &&
208
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
208
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
209
		    rdev->pdev->subsystem_device == p->subsys_device) {
209
		    rdev->pdev->subsystem_device == p->subsys_device) {
210
			rdev->px_quirk_flags = p->px_quirk_flags;
210
			rdev->px_quirk_flags = p->px_quirk_flags;
211
			break;
211
			break;
212
		}
212
		}
213
		++p;
213
		++p;
214
	}
214
	}
215
 
215
 
216
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
216
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
217
		rdev->flags &= ~RADEON_IS_PX;
217
		rdev->flags &= ~RADEON_IS_PX;
218
}
218
}
219
 
219
 
220
/**
220
/**
221
 * radeon_program_register_sequence - program an array of registers.
221
 * radeon_program_register_sequence - program an array of registers.
222
 *
222
 *
223
 * @rdev: radeon_device pointer
223
 * @rdev: radeon_device pointer
224
 * @registers: pointer to the register array
224
 * @registers: pointer to the register array
225
 * @array_size: size of the register array
225
 * @array_size: size of the register array
226
 *
226
 *
227
 * Programs an array or registers with and and or masks.
227
 * Programs an array or registers with and and or masks.
228
 * This is a helper for setting golden registers.
228
 * This is a helper for setting golden registers.
229
 */
229
 */
230
void radeon_program_register_sequence(struct radeon_device *rdev,
230
void radeon_program_register_sequence(struct radeon_device *rdev,
231
				      const u32 *registers,
231
				      const u32 *registers,
232
				      const u32 array_size)
232
				      const u32 array_size)
233
{
233
{
234
	u32 tmp, reg, and_mask, or_mask;
234
	u32 tmp, reg, and_mask, or_mask;
235
	int i;
235
	int i;
236
 
236
 
237
	if (array_size % 3)
237
	if (array_size % 3)
238
		return;
238
		return;
239
 
239
 
240
	for (i = 0; i < array_size; i +=3) {
240
	for (i = 0; i < array_size; i +=3) {
241
		reg = registers[i + 0];
241
		reg = registers[i + 0];
242
		and_mask = registers[i + 1];
242
		and_mask = registers[i + 1];
243
		or_mask = registers[i + 2];
243
		or_mask = registers[i + 2];
244
 
244
 
245
		if (and_mask == 0xffffffff) {
245
		if (and_mask == 0xffffffff) {
246
			tmp = or_mask;
246
			tmp = or_mask;
247
		} else {
247
		} else {
248
			tmp = RREG32(reg);
248
			tmp = RREG32(reg);
249
			tmp &= ~and_mask;
249
			tmp &= ~and_mask;
250
			tmp |= or_mask;
250
			tmp |= or_mask;
251
		}
251
		}
252
		WREG32(reg, tmp);
252
		WREG32(reg, tmp);
253
	}
253
	}
254
}
254
}
255
 
255
 
256
void radeon_pci_config_reset(struct radeon_device *rdev)
256
void radeon_pci_config_reset(struct radeon_device *rdev)
257
{
257
{
258
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
258
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
259
}
259
}
260
 
260
 
261
/**
261
/**
262
 * radeon_surface_init - Clear GPU surface registers.
262
 * radeon_surface_init - Clear GPU surface registers.
263
 *
263
 *
264
 * @rdev: radeon_device pointer
264
 * @rdev: radeon_device pointer
265
 *
265
 *
266
 * Clear GPU surface registers (r1xx-r5xx).
266
 * Clear GPU surface registers (r1xx-r5xx).
267
 */
267
 */
268
void radeon_surface_init(struct radeon_device *rdev)
268
void radeon_surface_init(struct radeon_device *rdev)
269
{
269
{
270
    /* FIXME: check this out */
270
    /* FIXME: check this out */
271
    if (rdev->family < CHIP_R600) {
271
    if (rdev->family < CHIP_R600) {
272
        int i;
272
        int i;
273
 
273
 
274
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
274
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
275
			if (rdev->surface_regs[i].bo)
275
			if (rdev->surface_regs[i].bo)
276
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
276
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
277
			else
277
			else
278
           radeon_clear_surface_reg(rdev, i);
278
           radeon_clear_surface_reg(rdev, i);
279
        }
279
        }
280
		/* enable surfaces */
280
		/* enable surfaces */
281
		WREG32(RADEON_SURFACE_CNTL, 0);
281
		WREG32(RADEON_SURFACE_CNTL, 0);
282
    }
282
    }
283
}
283
}
284
 
284
 
285
/*
285
/*
286
 * GPU scratch registers helpers function.
286
 * GPU scratch registers helpers function.
287
 */
287
 */
288
/**
288
/**
289
 * radeon_scratch_init - Init scratch register driver information.
289
 * radeon_scratch_init - Init scratch register driver information.
290
 *
290
 *
291
 * @rdev: radeon_device pointer
291
 * @rdev: radeon_device pointer
292
 *
292
 *
293
 * Init CP scratch register driver information (r1xx-r5xx)
293
 * Init CP scratch register driver information (r1xx-r5xx)
294
 */
294
 */
295
void radeon_scratch_init(struct radeon_device *rdev)
295
void radeon_scratch_init(struct radeon_device *rdev)
296
{
296
{
297
    int i;
297
    int i;
298
 
298
 
299
    /* FIXME: check this out */
299
    /* FIXME: check this out */
300
    if (rdev->family < CHIP_R300) {
300
    if (rdev->family < CHIP_R300) {
301
        rdev->scratch.num_reg = 5;
301
        rdev->scratch.num_reg = 5;
302
    } else {
302
    } else {
303
        rdev->scratch.num_reg = 7;
303
        rdev->scratch.num_reg = 7;
304
    }
304
    }
305
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
305
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
306
    for (i = 0; i < rdev->scratch.num_reg; i++) {
306
    for (i = 0; i < rdev->scratch.num_reg; i++) {
307
        rdev->scratch.free[i] = true;
307
        rdev->scratch.free[i] = true;
308
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
308
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
309
    }
309
    }
310
}
310
}
311
 
311
 
312
/**
312
/**
313
 * radeon_scratch_get - Allocate a scratch register
313
 * radeon_scratch_get - Allocate a scratch register
314
 *
314
 *
315
 * @rdev: radeon_device pointer
315
 * @rdev: radeon_device pointer
316
 * @reg: scratch register mmio offset
316
 * @reg: scratch register mmio offset
317
 *
317
 *
318
 * Allocate a CP scratch register for use by the driver (all asics).
318
 * Allocate a CP scratch register for use by the driver (all asics).
319
 * Returns 0 on success or -EINVAL on failure.
319
 * Returns 0 on success or -EINVAL on failure.
320
 */
320
 */
321
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
321
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
322
{
322
{
323
	int i;
323
	int i;
324
 
324
 
325
	for (i = 0; i < rdev->scratch.num_reg; i++) {
325
	for (i = 0; i < rdev->scratch.num_reg; i++) {
326
		if (rdev->scratch.free[i]) {
326
		if (rdev->scratch.free[i]) {
327
			rdev->scratch.free[i] = false;
327
			rdev->scratch.free[i] = false;
328
			*reg = rdev->scratch.reg[i];
328
			*reg = rdev->scratch.reg[i];
329
			return 0;
329
			return 0;
330
		}
330
		}
331
	}
331
	}
332
	return -EINVAL;
332
	return -EINVAL;
333
}
333
}
334
 
334
 
335
/**
335
/**
336
 * radeon_scratch_free - Free a scratch register
336
 * radeon_scratch_free - Free a scratch register
337
 *
337
 *
338
 * @rdev: radeon_device pointer
338
 * @rdev: radeon_device pointer
339
 * @reg: scratch register mmio offset
339
 * @reg: scratch register mmio offset
340
 *
340
 *
341
 * Free a CP scratch register allocated for use by the driver (all asics)
341
 * Free a CP scratch register allocated for use by the driver (all asics)
342
 */
342
 */
343
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
343
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
344
{
344
{
345
	int i;
345
	int i;
346
 
346
 
347
	for (i = 0; i < rdev->scratch.num_reg; i++) {
347
	for (i = 0; i < rdev->scratch.num_reg; i++) {
348
		if (rdev->scratch.reg[i] == reg) {
348
		if (rdev->scratch.reg[i] == reg) {
349
			rdev->scratch.free[i] = true;
349
			rdev->scratch.free[i] = true;
350
			return;
350
			return;
351
		}
351
		}
352
	}
352
	}
353
}
353
}
354
 
354
 
355
/*
355
/*
356
 * GPU doorbell aperture helpers function.
356
 * GPU doorbell aperture helpers function.
357
 */
357
 */
358
/**
358
/**
359
 * radeon_doorbell_init - Init doorbell driver information.
359
 * radeon_doorbell_init - Init doorbell driver information.
360
 *
360
 *
361
 * @rdev: radeon_device pointer
361
 * @rdev: radeon_device pointer
362
 *
362
 *
363
 * Init doorbell driver information (CIK)
363
 * Init doorbell driver information (CIK)
364
 * Returns 0 on success, error on failure.
364
 * Returns 0 on success, error on failure.
365
 */
365
 */
366
static int radeon_doorbell_init(struct radeon_device *rdev)
366
static int radeon_doorbell_init(struct radeon_device *rdev)
367
{
367
{
368
	/* doorbell bar mapping */
368
	/* doorbell bar mapping */
369
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
369
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
370
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
370
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
371
 
371
 
372
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
372
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
373
	if (rdev->doorbell.num_doorbells == 0)
373
	if (rdev->doorbell.num_doorbells == 0)
374
		return -EINVAL;
374
		return -EINVAL;
375
 
375
 
376
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
376
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
377
	if (rdev->doorbell.ptr == NULL) {
377
	if (rdev->doorbell.ptr == NULL) {
378
		return -ENOMEM;
378
		return -ENOMEM;
379
	}
379
	}
380
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
380
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
381
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
381
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
382
 
382
 
383
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
383
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
384
 
384
 
385
	return 0;
385
	return 0;
386
}
386
}
387
 
387
 
388
/**
388
/**
389
 * radeon_doorbell_fini - Tear down doorbell driver information.
389
 * radeon_doorbell_fini - Tear down doorbell driver information.
390
 *
390
 *
391
 * @rdev: radeon_device pointer
391
 * @rdev: radeon_device pointer
392
 *
392
 *
393
 * Tear down doorbell driver information (CIK)
393
 * Tear down doorbell driver information (CIK)
394
 */
394
 */
395
static void radeon_doorbell_fini(struct radeon_device *rdev)
395
static void radeon_doorbell_fini(struct radeon_device *rdev)
396
{
396
{
397
	iounmap(rdev->doorbell.ptr);
397
	iounmap(rdev->doorbell.ptr);
398
	rdev->doorbell.ptr = NULL;
398
	rdev->doorbell.ptr = NULL;
399
}
399
}
400
 
400
 
401
/**
401
/**
402
 * radeon_doorbell_get - Allocate a doorbell entry
402
 * radeon_doorbell_get - Allocate a doorbell entry
403
 *
403
 *
404
 * @rdev: radeon_device pointer
404
 * @rdev: radeon_device pointer
405
 * @doorbell: doorbell index
405
 * @doorbell: doorbell index
406
 *
406
 *
407
 * Allocate a doorbell for use by the driver (all asics).
407
 * Allocate a doorbell for use by the driver (all asics).
408
 * Returns 0 on success or -EINVAL on failure.
408
 * Returns 0 on success or -EINVAL on failure.
409
 */
409
 */
410
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
410
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
411
{
411
{
412
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
412
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
413
	if (offset < rdev->doorbell.num_doorbells) {
413
	if (offset < rdev->doorbell.num_doorbells) {
414
		__set_bit(offset, rdev->doorbell.used);
414
		__set_bit(offset, rdev->doorbell.used);
415
		*doorbell = offset;
415
		*doorbell = offset;
416
		return 0;
416
		return 0;
417
	} else {
417
	} else {
418
		return -EINVAL;
418
		return -EINVAL;
419
	}
419
	}
420
}
420
}
421
 
421
 
422
/**
422
/**
423
 * radeon_doorbell_free - Free a doorbell entry
423
 * radeon_doorbell_free - Free a doorbell entry
424
 *
424
 *
425
 * @rdev: radeon_device pointer
425
 * @rdev: radeon_device pointer
426
 * @doorbell: doorbell index
426
 * @doorbell: doorbell index
427
 *
427
 *
428
 * Free a doorbell allocated for use by the driver (all asics)
428
 * Free a doorbell allocated for use by the driver (all asics)
429
 */
429
 */
430
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
430
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
431
{
431
{
432
	if (doorbell < rdev->doorbell.num_doorbells)
432
	if (doorbell < rdev->doorbell.num_doorbells)
433
		__clear_bit(doorbell, rdev->doorbell.used);
433
		__clear_bit(doorbell, rdev->doorbell.used);
434
}
434
}
435
 
435
 
436
/*
436
/*
437
 * radeon_wb_*()
437
 * radeon_wb_*()
438
 * Writeback is the the method by which the the GPU updates special pages
438
 * Writeback is the the method by which the the GPU updates special pages
439
 * in memory with the status of certain GPU events (fences, ring pointers,
439
 * in memory with the status of certain GPU events (fences, ring pointers,
440
 * etc.).
440
 * etc.).
441
 */
441
 */
442
 
442
 
443
/**
443
/**
444
 * radeon_wb_disable - Disable Writeback
444
 * radeon_wb_disable - Disable Writeback
445
 *
445
 *
446
 * @rdev: radeon_device pointer
446
 * @rdev: radeon_device pointer
447
 *
447
 *
448
 * Disables Writeback (all asics).  Used for suspend.
448
 * Disables Writeback (all asics).  Used for suspend.
449
 */
449
 */
450
void radeon_wb_disable(struct radeon_device *rdev)
450
void radeon_wb_disable(struct radeon_device *rdev)
451
{
451
{
452
	rdev->wb.enabled = false;
452
	rdev->wb.enabled = false;
453
}
453
}
454
 
454
 
455
/**
455
/**
456
 * radeon_wb_fini - Disable Writeback and free memory
456
 * radeon_wb_fini - Disable Writeback and free memory
457
 *
457
 *
458
 * @rdev: radeon_device pointer
458
 * @rdev: radeon_device pointer
459
 *
459
 *
460
 * Disables Writeback and frees the Writeback memory (all asics).
460
 * Disables Writeback and frees the Writeback memory (all asics).
461
 * Used at driver shutdown.
461
 * Used at driver shutdown.
462
 */
462
 */
463
void radeon_wb_fini(struct radeon_device *rdev)
463
void radeon_wb_fini(struct radeon_device *rdev)
464
{
464
{
465
	radeon_wb_disable(rdev);
465
	radeon_wb_disable(rdev);
466
	if (rdev->wb.wb_obj) {
466
	if (rdev->wb.wb_obj) {
467
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
467
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
468
			radeon_bo_kunmap(rdev->wb.wb_obj);
468
			radeon_bo_kunmap(rdev->wb.wb_obj);
469
			radeon_bo_unpin(rdev->wb.wb_obj);
469
			radeon_bo_unpin(rdev->wb.wb_obj);
470
			radeon_bo_unreserve(rdev->wb.wb_obj);
470
			radeon_bo_unreserve(rdev->wb.wb_obj);
471
		}
471
		}
472
		radeon_bo_unref(&rdev->wb.wb_obj);
472
		radeon_bo_unref(&rdev->wb.wb_obj);
473
		rdev->wb.wb = NULL;
473
		rdev->wb.wb = NULL;
474
		rdev->wb.wb_obj = NULL;
474
		rdev->wb.wb_obj = NULL;
475
	}
475
	}
476
}
476
}
477
 
477
 
478
/**
478
/**
479
 * radeon_wb_init- Init Writeback driver info and allocate memory
479
 * radeon_wb_init- Init Writeback driver info and allocate memory
480
 *
480
 *
481
 * @rdev: radeon_device pointer
481
 * @rdev: radeon_device pointer
482
 *
482
 *
483
 * Disables Writeback and frees the Writeback memory (all asics).
483
 * Disables Writeback and frees the Writeback memory (all asics).
484
 * Used at driver startup.
484
 * Used at driver startup.
485
 * Returns 0 on success or an -error on failure.
485
 * Returns 0 on success or an -error on failure.
486
 */
486
 */
487
int radeon_wb_init(struct radeon_device *rdev)
487
int radeon_wb_init(struct radeon_device *rdev)
488
{
488
{
489
	int r;
489
	int r;
490
 
490
 
491
	if (rdev->wb.wb_obj == NULL) {
491
	if (rdev->wb.wb_obj == NULL) {
492
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
492
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
493
				     RADEON_GEM_DOMAIN_GTT, 0, NULL,
493
				     RADEON_GEM_DOMAIN_GTT, 0, NULL,
494
				     &rdev->wb.wb_obj);
494
				     &rdev->wb.wb_obj);
495
		if (r) {
495
		if (r) {
496
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
496
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
497
			return r;
497
			return r;
498
		}
498
		}
499
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
499
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
500
	if (unlikely(r != 0)) {
500
	if (unlikely(r != 0)) {
501
		radeon_wb_fini(rdev);
501
		radeon_wb_fini(rdev);
502
		return r;
502
		return r;
503
	}
503
	}
504
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
504
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
505
			  &rdev->wb.gpu_addr);
505
			  &rdev->wb.gpu_addr);
506
	if (r) {
506
	if (r) {
507
		radeon_bo_unreserve(rdev->wb.wb_obj);
507
		radeon_bo_unreserve(rdev->wb.wb_obj);
508
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
508
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
509
		radeon_wb_fini(rdev);
509
		radeon_wb_fini(rdev);
510
		return r;
510
		return r;
511
	}
511
	}
512
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
512
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
513
	radeon_bo_unreserve(rdev->wb.wb_obj);
513
	radeon_bo_unreserve(rdev->wb.wb_obj);
514
	if (r) {
514
	if (r) {
515
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
515
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
516
		radeon_wb_fini(rdev);
516
		radeon_wb_fini(rdev);
517
		return r;
517
		return r;
518
	}
518
	}
519
	}
519
	}
520
 
520
 
521
	/* clear wb memory */
521
	/* clear wb memory */
522
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
522
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
523
	/* disable event_write fences */
523
	/* disable event_write fences */
524
	rdev->wb.use_event = false;
524
	rdev->wb.use_event = false;
525
	/* disabled via module param */
525
	/* disabled via module param */
526
	if (radeon_no_wb == 1) {
526
	if (radeon_no_wb == 1) {
527
		rdev->wb.enabled = false;
527
		rdev->wb.enabled = false;
528
	} else {
528
	} else {
529
		if (rdev->flags & RADEON_IS_AGP) {
529
		if (rdev->flags & RADEON_IS_AGP) {
530
		/* often unreliable on AGP */
530
		/* often unreliable on AGP */
531
			rdev->wb.enabled = false;
531
			rdev->wb.enabled = false;
532
		} else if (rdev->family < CHIP_R300) {
532
		} else if (rdev->family < CHIP_R300) {
533
			/* often unreliable on pre-r300 */
533
			/* often unreliable on pre-r300 */
534
			rdev->wb.enabled = false;
534
			rdev->wb.enabled = false;
535
		} else {
535
		} else {
536
			rdev->wb.enabled = true;
536
			rdev->wb.enabled = true;
537
			/* event_write fences are only available on r600+ */
537
			/* event_write fences are only available on r600+ */
538
			if (rdev->family >= CHIP_R600) {
538
			if (rdev->family >= CHIP_R600) {
539
				rdev->wb.use_event = true;
539
				rdev->wb.use_event = true;
540
	}
540
	}
541
		}
541
		}
542
	}
542
	}
543
	/* always use writeback/events on NI, APUs */
543
	/* always use writeback/events on NI, APUs */
544
	if (rdev->family >= CHIP_PALM) {
544
	if (rdev->family >= CHIP_PALM) {
545
		rdev->wb.enabled = true;
545
		rdev->wb.enabled = true;
546
		rdev->wb.use_event = true;
546
		rdev->wb.use_event = true;
547
	}
547
	}
548
 
548
 
549
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
549
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
550
 
550
 
551
	return 0;
551
	return 0;
552
}
552
}
553
 
553
 
554
/**
554
/**
555
 * radeon_vram_location - try to find VRAM location
555
 * radeon_vram_location - try to find VRAM location
556
 * @rdev: radeon device structure holding all necessary informations
556
 * @rdev: radeon device structure holding all necessary informations
557
 * @mc: memory controller structure holding memory informations
557
 * @mc: memory controller structure holding memory informations
558
 * @base: base address at which to put VRAM
558
 * @base: base address at which to put VRAM
559
 *
559
 *
560
 * Function will place try to place VRAM at base address provided
560
 * Function will place try to place VRAM at base address provided
561
 * as parameter (which is so far either PCI aperture address or
561
 * as parameter (which is so far either PCI aperture address or
562
 * for IGP TOM base address).
562
 * for IGP TOM base address).
563
 *
563
 *
564
 * If there is not enough space to fit the unvisible VRAM in the 32bits
564
 * If there is not enough space to fit the unvisible VRAM in the 32bits
565
 * address space then we limit the VRAM size to the aperture.
565
 * address space then we limit the VRAM size to the aperture.
566
 *
566
 *
567
 * If we are using AGP and if the AGP aperture doesn't allow us to have
567
 * If we are using AGP and if the AGP aperture doesn't allow us to have
568
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
568
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
569
 * size and print a warning.
569
 * size and print a warning.
570
 *
570
 *
571
 * This function will never fails, worst case are limiting VRAM.
571
 * This function will never fails, worst case are limiting VRAM.
572
 *
572
 *
573
 * Note: GTT start, end, size should be initialized before calling this
573
 * Note: GTT start, end, size should be initialized before calling this
574
 * function on AGP platform.
574
 * function on AGP platform.
575
 *
575
 *
576
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
576
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
577
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
577
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
578
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
578
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
579
 * not IGP.
579
 * not IGP.
580
 *
580
 *
581
 * Note: we use mc_vram_size as on some board we need to program the mc to
581
 * Note: we use mc_vram_size as on some board we need to program the mc to
582
 * cover the whole aperture even if VRAM size is inferior to aperture size
582
 * cover the whole aperture even if VRAM size is inferior to aperture size
583
 * Novell bug 204882 + along with lots of ubuntu ones
583
 * Novell bug 204882 + along with lots of ubuntu ones
584
 *
584
 *
585
 * Note: when limiting vram it's safe to overwritte real_vram_size because
585
 * Note: when limiting vram it's safe to overwritte real_vram_size because
586
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
586
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
587
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
587
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
588
 * ones)
588
 * ones)
589
 *
589
 *
590
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
590
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
591
 * explicitly check for that thought.
591
 * explicitly check for that thought.
592
 *
592
 *
593
 * FIXME: when reducing VRAM size align new size on power of 2.
593
 * FIXME: when reducing VRAM size align new size on power of 2.
594
 */
594
 */
595
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
595
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
596
{
596
{
597
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
597
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
598
 
598
 
599
	mc->vram_start = base;
599
	mc->vram_start = base;
600
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
600
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
601
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
601
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
602
		mc->real_vram_size = mc->aper_size;
602
		mc->real_vram_size = mc->aper_size;
603
		mc->mc_vram_size = mc->aper_size;
603
		mc->mc_vram_size = mc->aper_size;
604
	}
604
	}
605
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
605
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
606
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
606
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
607
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
607
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
608
		mc->real_vram_size = mc->aper_size;
608
		mc->real_vram_size = mc->aper_size;
609
		mc->mc_vram_size = mc->aper_size;
609
		mc->mc_vram_size = mc->aper_size;
610
		}
610
		}
611
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
611
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
612
	if (limit && limit < mc->real_vram_size)
612
	if (limit && limit < mc->real_vram_size)
613
		mc->real_vram_size = limit;
613
		mc->real_vram_size = limit;
614
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
614
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
615
			mc->mc_vram_size >> 20, mc->vram_start,
615
			mc->mc_vram_size >> 20, mc->vram_start,
616
			mc->vram_end, mc->real_vram_size >> 20);
616
			mc->vram_end, mc->real_vram_size >> 20);
617
}
617
}
618
 
618
 
619
/**
619
/**
620
 * radeon_gtt_location - try to find GTT location
620
 * radeon_gtt_location - try to find GTT location
621
 * @rdev: radeon device structure holding all necessary informations
621
 * @rdev: radeon device structure holding all necessary informations
622
 * @mc: memory controller structure holding memory informations
622
 * @mc: memory controller structure holding memory informations
623
 *
623
 *
624
 * Function will place try to place GTT before or after VRAM.
624
 * Function will place try to place GTT before or after VRAM.
625
 *
625
 *
626
 * If GTT size is bigger than space left then we ajust GTT size.
626
 * If GTT size is bigger than space left then we ajust GTT size.
627
 * Thus function will never fails.
627
 * Thus function will never fails.
628
 *
628
 *
629
 * FIXME: when reducing GTT size align new size on power of 2.
629
 * FIXME: when reducing GTT size align new size on power of 2.
630
 */
630
 */
631
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
631
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
632
{
632
{
633
	u64 size_af, size_bf;
633
	u64 size_af, size_bf;
634
 
634
 
635
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
635
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
636
	size_bf = mc->vram_start & ~mc->gtt_base_align;
636
	size_bf = mc->vram_start & ~mc->gtt_base_align;
637
	if (size_bf > size_af) {
637
	if (size_bf > size_af) {
638
		if (mc->gtt_size > size_bf) {
638
		if (mc->gtt_size > size_bf) {
639
			dev_warn(rdev->dev, "limiting GTT\n");
639
			dev_warn(rdev->dev, "limiting GTT\n");
640
			mc->gtt_size = size_bf;
640
			mc->gtt_size = size_bf;
641
		}
641
		}
642
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
642
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
643
	} else {
643
	} else {
644
		if (mc->gtt_size > size_af) {
644
		if (mc->gtt_size > size_af) {
645
			dev_warn(rdev->dev, "limiting GTT\n");
645
			dev_warn(rdev->dev, "limiting GTT\n");
646
			mc->gtt_size = size_af;
646
			mc->gtt_size = size_af;
647
		}
647
		}
648
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
648
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
649
	}
649
	}
650
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
650
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
651
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
651
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
652
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
652
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
653
}
653
}
654
 
654
 
655
/*
655
/*
656
 * GPU helpers function.
656
 * GPU helpers function.
657
 */
657
 */
658
/**
658
/**
659
 * radeon_card_posted - check if the hw has already been initialized
659
 * radeon_card_posted - check if the hw has already been initialized
660
 *
660
 *
661
 * @rdev: radeon_device pointer
661
 * @rdev: radeon_device pointer
662
 *
662
 *
663
 * Check if the asic has been initialized (all asics).
663
 * Check if the asic has been initialized (all asics).
664
 * Used at driver startup.
664
 * Used at driver startup.
665
 * Returns true if initialized or false if not.
665
 * Returns true if initialized or false if not.
666
 */
666
 */
667
bool radeon_card_posted(struct radeon_device *rdev)
667
bool radeon_card_posted(struct radeon_device *rdev)
668
{
668
{
669
	uint32_t reg;
669
	uint32_t reg;
670
 
670
 
671
	if (ASIC_IS_NODCE(rdev))
671
	if (ASIC_IS_NODCE(rdev))
672
		goto check_memsize;
672
		goto check_memsize;
673
 
673
 
674
	/* first check CRTCs */
674
	/* first check CRTCs */
675
	if (ASIC_IS_DCE4(rdev)) {
675
	if (ASIC_IS_DCE4(rdev)) {
676
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
676
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
677
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
677
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
678
			if (rdev->num_crtc >= 4) {
678
			if (rdev->num_crtc >= 4) {
679
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
679
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
680
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
680
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
681
			}
681
			}
682
			if (rdev->num_crtc >= 6) {
682
			if (rdev->num_crtc >= 6) {
683
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
683
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
684
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
684
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
685
			}
685
			}
686
		if (reg & EVERGREEN_CRTC_MASTER_EN)
686
		if (reg & EVERGREEN_CRTC_MASTER_EN)
687
			return true;
687
			return true;
688
	} else if (ASIC_IS_AVIVO(rdev)) {
688
	} else if (ASIC_IS_AVIVO(rdev)) {
689
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
689
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
690
		      RREG32(AVIVO_D2CRTC_CONTROL);
690
		      RREG32(AVIVO_D2CRTC_CONTROL);
691
		if (reg & AVIVO_CRTC_EN) {
691
		if (reg & AVIVO_CRTC_EN) {
692
			return true;
692
			return true;
693
		}
693
		}
694
	} else {
694
	} else {
695
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
695
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
696
		      RREG32(RADEON_CRTC2_GEN_CNTL);
696
		      RREG32(RADEON_CRTC2_GEN_CNTL);
697
		if (reg & RADEON_CRTC_EN) {
697
		if (reg & RADEON_CRTC_EN) {
698
			return true;
698
			return true;
699
		}
699
		}
700
	}
700
	}
701
 
701
 
702
check_memsize:
702
check_memsize:
703
	/* then check MEM_SIZE, in case the crtcs are off */
703
	/* then check MEM_SIZE, in case the crtcs are off */
704
	if (rdev->family >= CHIP_R600)
704
	if (rdev->family >= CHIP_R600)
705
		reg = RREG32(R600_CONFIG_MEMSIZE);
705
		reg = RREG32(R600_CONFIG_MEMSIZE);
706
	else
706
	else
707
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
707
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
708
 
708
 
709
	if (reg)
709
	if (reg)
710
		return true;
710
		return true;
711
 
711
 
712
	return false;
712
	return false;
713
 
713
 
714
}
714
}
715
 
715
 
716
/**
716
/**
717
 * radeon_update_bandwidth_info - update display bandwidth params
717
 * radeon_update_bandwidth_info - update display bandwidth params
718
 *
718
 *
719
 * @rdev: radeon_device pointer
719
 * @rdev: radeon_device pointer
720
 *
720
 *
721
 * Used when sclk/mclk are switched or display modes are set.
721
 * Used when sclk/mclk are switched or display modes are set.
722
 * params are used to calculate display watermarks (all asics)
722
 * params are used to calculate display watermarks (all asics)
723
 */
723
 */
724
void radeon_update_bandwidth_info(struct radeon_device *rdev)
724
void radeon_update_bandwidth_info(struct radeon_device *rdev)
725
{
725
{
726
	fixed20_12 a;
726
	fixed20_12 a;
727
	u32 sclk = rdev->pm.current_sclk;
727
	u32 sclk = rdev->pm.current_sclk;
728
	u32 mclk = rdev->pm.current_mclk;
728
	u32 mclk = rdev->pm.current_mclk;
729
 
729
 
730
	/* sclk/mclk in Mhz */
730
	/* sclk/mclk in Mhz */
731
		a.full = dfixed_const(100);
731
		a.full = dfixed_const(100);
732
		rdev->pm.sclk.full = dfixed_const(sclk);
732
		rdev->pm.sclk.full = dfixed_const(sclk);
733
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
733
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
734
		rdev->pm.mclk.full = dfixed_const(mclk);
734
		rdev->pm.mclk.full = dfixed_const(mclk);
735
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
735
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
736
 
736
 
737
	if (rdev->flags & RADEON_IS_IGP) {
737
	if (rdev->flags & RADEON_IS_IGP) {
738
		a.full = dfixed_const(16);
738
		a.full = dfixed_const(16);
739
		/* core_bandwidth = sclk(Mhz) * 16 */
739
		/* core_bandwidth = sclk(Mhz) * 16 */
740
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
740
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
741
	}
741
	}
742
}
742
}
743
 
743
 
744
/**
744
/**
745
 * radeon_boot_test_post_card - check and possibly initialize the hw
745
 * radeon_boot_test_post_card - check and possibly initialize the hw
746
 *
746
 *
747
 * @rdev: radeon_device pointer
747
 * @rdev: radeon_device pointer
748
 *
748
 *
749
 * Check if the asic is initialized and if not, attempt to initialize
749
 * Check if the asic is initialized and if not, attempt to initialize
750
 * it (all asics).
750
 * it (all asics).
751
 * Returns true if initialized or false if not.
751
 * Returns true if initialized or false if not.
752
 */
752
 */
753
bool radeon_boot_test_post_card(struct radeon_device *rdev)
753
bool radeon_boot_test_post_card(struct radeon_device *rdev)
754
{
754
{
755
	if (radeon_card_posted(rdev))
755
	if (radeon_card_posted(rdev))
756
		return true;
756
		return true;
757
 
757
 
758
	if (rdev->bios) {
758
	if (rdev->bios) {
759
		DRM_INFO("GPU not posted. posting now...\n");
759
		DRM_INFO("GPU not posted. posting now...\n");
760
		if (rdev->is_atom_bios)
760
		if (rdev->is_atom_bios)
761
			atom_asic_init(rdev->mode_info.atom_context);
761
			atom_asic_init(rdev->mode_info.atom_context);
762
		else
762
		else
763
			radeon_combios_asic_init(rdev->ddev);
763
			radeon_combios_asic_init(rdev->ddev);
764
		return true;
764
		return true;
765
	} else {
765
	} else {
766
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
766
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
767
		return false;
767
		return false;
768
	}
768
	}
769
}
769
}
770
 
770
 
771
/**
771
/**
772
 * radeon_dummy_page_init - init dummy page used by the driver
772
 * radeon_dummy_page_init - init dummy page used by the driver
773
 *
773
 *
774
 * @rdev: radeon_device pointer
774
 * @rdev: radeon_device pointer
775
 *
775
 *
776
 * Allocate the dummy page used by the driver (all asics).
776
 * Allocate the dummy page used by the driver (all asics).
777
 * This dummy page is used by the driver as a filler for gart entries
777
 * This dummy page is used by the driver as a filler for gart entries
778
 * when pages are taken out of the GART
778
 * when pages are taken out of the GART
779
 * Returns 0 on sucess, -ENOMEM on failure.
779
 * Returns 0 on sucess, -ENOMEM on failure.
780
 */
780
 */
781
int radeon_dummy_page_init(struct radeon_device *rdev)
781
int radeon_dummy_page_init(struct radeon_device *rdev)
782
{
782
{
783
	if (rdev->dummy_page.page)
783
	if (rdev->dummy_page.page)
784
		return 0;
784
		return 0;
785
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
785
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
786
	if (rdev->dummy_page.page == NULL)
786
	if (rdev->dummy_page.page == NULL)
787
		return -ENOMEM;
787
		return -ENOMEM;
788
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
788
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
789
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
789
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
790
	return 0;
790
	return 0;
791
}
791
}
792
 
792
 
793
/**
793
/**
794
 * radeon_dummy_page_fini - free dummy page used by the driver
794
 * radeon_dummy_page_fini - free dummy page used by the driver
795
 *
795
 *
796
 * @rdev: radeon_device pointer
796
 * @rdev: radeon_device pointer
797
 *
797
 *
798
 * Frees the dummy page used by the driver (all asics).
798
 * Frees the dummy page used by the driver (all asics).
799
 */
799
 */
800
void radeon_dummy_page_fini(struct radeon_device *rdev)
800
void radeon_dummy_page_fini(struct radeon_device *rdev)
801
{
801
{
802
	if (rdev->dummy_page.page == NULL)
802
	if (rdev->dummy_page.page == NULL)
803
		return;
803
		return;
804
 
804
 
805
	rdev->dummy_page.page = NULL;
805
	rdev->dummy_page.page = NULL;
806
}
806
}
807
 
807
 
808
 
808
 
809
/* ATOM accessor methods */
809
/* ATOM accessor methods */
810
/*
810
/*
811
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
811
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
812
 * driver registers callbacks to access registers and the interpreter
812
 * driver registers callbacks to access registers and the interpreter
813
 * in the driver parses the tables and executes then to program specific
813
 * in the driver parses the tables and executes then to program specific
814
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
814
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
815
 * atombios.h, and atom.c
815
 * atombios.h, and atom.c
816
 */
816
 */
817
 
817
 
818
/**
818
/**
819
 * cail_pll_read - read PLL register
819
 * cail_pll_read - read PLL register
820
 *
820
 *
821
 * @info: atom card_info pointer
821
 * @info: atom card_info pointer
822
 * @reg: PLL register offset
822
 * @reg: PLL register offset
823
 *
823
 *
824
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
824
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
825
 * Returns the value of the PLL register.
825
 * Returns the value of the PLL register.
826
 */
826
 */
827
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
827
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
828
{
828
{
829
    struct radeon_device *rdev = info->dev->dev_private;
829
    struct radeon_device *rdev = info->dev->dev_private;
830
    uint32_t r;
830
    uint32_t r;
831
 
831
 
832
    r = rdev->pll_rreg(rdev, reg);
832
    r = rdev->pll_rreg(rdev, reg);
833
    return r;
833
    return r;
834
}
834
}
835
 
835
 
836
/**
836
/**
837
 * cail_pll_write - write PLL register
837
 * cail_pll_write - write PLL register
838
 *
838
 *
839
 * @info: atom card_info pointer
839
 * @info: atom card_info pointer
840
 * @reg: PLL register offset
840
 * @reg: PLL register offset
841
 * @val: value to write to the pll register
841
 * @val: value to write to the pll register
842
 *
842
 *
843
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
843
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
844
 */
844
 */
845
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
845
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
846
{
846
{
847
    struct radeon_device *rdev = info->dev->dev_private;
847
    struct radeon_device *rdev = info->dev->dev_private;
848
 
848
 
849
    rdev->pll_wreg(rdev, reg, val);
849
    rdev->pll_wreg(rdev, reg, val);
850
}
850
}
851
 
851
 
852
/**
852
/**
853
 * cail_mc_read - read MC (Memory Controller) register
853
 * cail_mc_read - read MC (Memory Controller) register
854
 *
854
 *
855
 * @info: atom card_info pointer
855
 * @info: atom card_info pointer
856
 * @reg: MC register offset
856
 * @reg: MC register offset
857
 *
857
 *
858
 * Provides an MC register accessor for the atom interpreter (r4xx+).
858
 * Provides an MC register accessor for the atom interpreter (r4xx+).
859
 * Returns the value of the MC register.
859
 * Returns the value of the MC register.
860
 */
860
 */
861
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
861
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
862
{
862
{
863
    struct radeon_device *rdev = info->dev->dev_private;
863
    struct radeon_device *rdev = info->dev->dev_private;
864
    uint32_t r;
864
    uint32_t r;
865
 
865
 
866
    r = rdev->mc_rreg(rdev, reg);
866
    r = rdev->mc_rreg(rdev, reg);
867
    return r;
867
    return r;
868
}
868
}
869
 
869
 
870
/**
870
/**
871
 * cail_mc_write - write MC (Memory Controller) register
871
 * cail_mc_write - write MC (Memory Controller) register
872
 *
872
 *
873
 * @info: atom card_info pointer
873
 * @info: atom card_info pointer
874
 * @reg: MC register offset
874
 * @reg: MC register offset
875
 * @val: value to write to the pll register
875
 * @val: value to write to the pll register
876
 *
876
 *
877
 * Provides a MC register accessor for the atom interpreter (r4xx+).
877
 * Provides a MC register accessor for the atom interpreter (r4xx+).
878
 */
878
 */
879
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
879
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
880
{
880
{
881
    struct radeon_device *rdev = info->dev->dev_private;
881
    struct radeon_device *rdev = info->dev->dev_private;
882
 
882
 
883
    rdev->mc_wreg(rdev, reg, val);
883
    rdev->mc_wreg(rdev, reg, val);
884
}
884
}
885
 
885
 
886
/**
886
/**
887
 * cail_reg_write - write MMIO register
887
 * cail_reg_write - write MMIO register
888
 *
888
 *
889
 * @info: atom card_info pointer
889
 * @info: atom card_info pointer
890
 * @reg: MMIO register offset
890
 * @reg: MMIO register offset
891
 * @val: value to write to the pll register
891
 * @val: value to write to the pll register
892
 *
892
 *
893
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
893
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
894
 */
894
 */
895
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
895
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
896
{
896
{
897
    struct radeon_device *rdev = info->dev->dev_private;
897
    struct radeon_device *rdev = info->dev->dev_private;
898
 
898
 
899
    WREG32(reg*4, val);
899
    WREG32(reg*4, val);
900
}
900
}
901
 
901
 
902
/**
902
/**
903
 * cail_reg_read - read MMIO register
903
 * cail_reg_read - read MMIO register
904
 *
904
 *
905
 * @info: atom card_info pointer
905
 * @info: atom card_info pointer
906
 * @reg: MMIO register offset
906
 * @reg: MMIO register offset
907
 *
907
 *
908
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
908
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
909
 * Returns the value of the MMIO register.
909
 * Returns the value of the MMIO register.
910
 */
910
 */
911
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
911
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
912
{
912
{
913
    struct radeon_device *rdev = info->dev->dev_private;
913
    struct radeon_device *rdev = info->dev->dev_private;
914
    uint32_t r;
914
    uint32_t r;
915
 
915
 
916
    r = RREG32(reg*4);
916
    r = RREG32(reg*4);
917
    return r;
917
    return r;
918
}
918
}
919
 
919
 
920
/**
920
/**
921
 * cail_ioreg_write - write IO register
921
 * cail_ioreg_write - write IO register
922
 *
922
 *
923
 * @info: atom card_info pointer
923
 * @info: atom card_info pointer
924
 * @reg: IO register offset
924
 * @reg: IO register offset
925
 * @val: value to write to the pll register
925
 * @val: value to write to the pll register
926
 *
926
 *
927
 * Provides a IO register accessor for the atom interpreter (r4xx+).
927
 * Provides a IO register accessor for the atom interpreter (r4xx+).
928
 */
928
 */
929
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
929
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
930
{
930
{
931
	struct radeon_device *rdev = info->dev->dev_private;
931
	struct radeon_device *rdev = info->dev->dev_private;
932
 
932
 
933
	WREG32_IO(reg*4, val);
933
	WREG32_IO(reg*4, val);
934
}
934
}
935
 
935
 
936
/**
936
/**
937
 * cail_ioreg_read - read IO register
937
 * cail_ioreg_read - read IO register
938
 *
938
 *
939
 * @info: atom card_info pointer
939
 * @info: atom card_info pointer
940
 * @reg: IO register offset
940
 * @reg: IO register offset
941
 *
941
 *
942
 * Provides an IO register accessor for the atom interpreter (r4xx+).
942
 * Provides an IO register accessor for the atom interpreter (r4xx+).
943
 * Returns the value of the IO register.
943
 * Returns the value of the IO register.
944
 */
944
 */
945
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
945
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
946
{
946
{
947
	struct radeon_device *rdev = info->dev->dev_private;
947
	struct radeon_device *rdev = info->dev->dev_private;
948
	uint32_t r;
948
	uint32_t r;
949
 
949
 
950
	r = RREG32_IO(reg*4);
950
	r = RREG32_IO(reg*4);
951
	return r;
951
	return r;
952
}
952
}
953
 
953
 
954
/**
954
/**
955
 * radeon_atombios_init - init the driver info and callbacks for atombios
955
 * radeon_atombios_init - init the driver info and callbacks for atombios
956
 *
956
 *
957
 * @rdev: radeon_device pointer
957
 * @rdev: radeon_device pointer
958
 *
958
 *
959
 * Initializes the driver info and register access callbacks for the
959
 * Initializes the driver info and register access callbacks for the
960
 * ATOM interpreter (r4xx+).
960
 * ATOM interpreter (r4xx+).
961
 * Returns 0 on sucess, -ENOMEM on failure.
961
 * Returns 0 on sucess, -ENOMEM on failure.
962
 * Called at driver startup.
962
 * Called at driver startup.
963
 */
963
 */
964
int radeon_atombios_init(struct radeon_device *rdev)
964
int radeon_atombios_init(struct radeon_device *rdev)
965
{
965
{
966
	struct card_info *atom_card_info =
966
	struct card_info *atom_card_info =
967
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
967
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
968
 
968
 
969
	if (!atom_card_info)
969
	if (!atom_card_info)
970
		return -ENOMEM;
970
		return -ENOMEM;
971
 
971
 
972
	rdev->mode_info.atom_card_info = atom_card_info;
972
	rdev->mode_info.atom_card_info = atom_card_info;
973
	atom_card_info->dev = rdev->ddev;
973
	atom_card_info->dev = rdev->ddev;
974
	atom_card_info->reg_read = cail_reg_read;
974
	atom_card_info->reg_read = cail_reg_read;
975
	atom_card_info->reg_write = cail_reg_write;
975
	atom_card_info->reg_write = cail_reg_write;
976
	/* needed for iio ops */
976
	/* needed for iio ops */
977
	if (rdev->rio_mem) {
977
	if (rdev->rio_mem) {
978
		atom_card_info->ioreg_read = cail_ioreg_read;
978
		atom_card_info->ioreg_read = cail_ioreg_read;
979
		atom_card_info->ioreg_write = cail_ioreg_write;
979
		atom_card_info->ioreg_write = cail_ioreg_write;
980
	} else {
980
	} else {
981
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
981
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
982
		atom_card_info->ioreg_read = cail_reg_read;
982
		atom_card_info->ioreg_read = cail_reg_read;
983
		atom_card_info->ioreg_write = cail_reg_write;
983
		atom_card_info->ioreg_write = cail_reg_write;
984
	}
984
	}
985
	atom_card_info->mc_read = cail_mc_read;
985
	atom_card_info->mc_read = cail_mc_read;
986
	atom_card_info->mc_write = cail_mc_write;
986
	atom_card_info->mc_write = cail_mc_write;
987
	atom_card_info->pll_read = cail_pll_read;
987
	atom_card_info->pll_read = cail_pll_read;
988
	atom_card_info->pll_write = cail_pll_write;
988
	atom_card_info->pll_write = cail_pll_write;
989
 
989
 
990
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
990
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
991
	if (!rdev->mode_info.atom_context) {
991
	if (!rdev->mode_info.atom_context) {
992
		radeon_atombios_fini(rdev);
992
		radeon_atombios_fini(rdev);
993
		return -ENOMEM;
993
		return -ENOMEM;
994
	}
994
	}
995
 
995
 
996
	mutex_init(&rdev->mode_info.atom_context->mutex);
996
	mutex_init(&rdev->mode_info.atom_context->mutex);
997
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
997
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
998
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
998
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
999
    return 0;
999
    return 0;
1000
}
1000
}
1001
 
1001
 
1002
/**
1002
/**
1003
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1003
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1004
 *
1004
 *
1005
 * @rdev: radeon_device pointer
1005
 * @rdev: radeon_device pointer
1006
 *
1006
 *
1007
 * Frees the driver info and register access callbacks for the ATOM
1007
 * Frees the driver info and register access callbacks for the ATOM
1008
 * interpreter (r4xx+).
1008
 * interpreter (r4xx+).
1009
 * Called at driver shutdown.
1009
 * Called at driver shutdown.
1010
 */
1010
 */
1011
void radeon_atombios_fini(struct radeon_device *rdev)
1011
void radeon_atombios_fini(struct radeon_device *rdev)
1012
{
1012
{
1013
	if (rdev->mode_info.atom_context) {
1013
	if (rdev->mode_info.atom_context) {
1014
		kfree(rdev->mode_info.atom_context->scratch);
1014
		kfree(rdev->mode_info.atom_context->scratch);
1015
	}
1015
	}
1016
	kfree(rdev->mode_info.atom_context);
1016
	kfree(rdev->mode_info.atom_context);
1017
	rdev->mode_info.atom_context = NULL;
1017
	rdev->mode_info.atom_context = NULL;
1018
	kfree(rdev->mode_info.atom_card_info);
1018
	kfree(rdev->mode_info.atom_card_info);
1019
	rdev->mode_info.atom_card_info = NULL;
1019
	rdev->mode_info.atom_card_info = NULL;
1020
}
1020
}
1021
 
1021
 
1022
/* COMBIOS */
1022
/* COMBIOS */
1023
/*
1023
/*
1024
 * COMBIOS is the bios format prior to ATOM. It provides
1024
 * COMBIOS is the bios format prior to ATOM. It provides
1025
 * command tables similar to ATOM, but doesn't have a unified
1025
 * command tables similar to ATOM, but doesn't have a unified
1026
 * parser.  See radeon_combios.c
1026
 * parser.  See radeon_combios.c
1027
 */
1027
 */
1028
 
1028
 
1029
/**
1029
/**
1030
 * radeon_combios_init - init the driver info for combios
1030
 * radeon_combios_init - init the driver info for combios
1031
 *
1031
 *
1032
 * @rdev: radeon_device pointer
1032
 * @rdev: radeon_device pointer
1033
 *
1033
 *
1034
 * Initializes the driver info for combios (r1xx-r3xx).
1034
 * Initializes the driver info for combios (r1xx-r3xx).
1035
 * Returns 0 on sucess.
1035
 * Returns 0 on sucess.
1036
 * Called at driver startup.
1036
 * Called at driver startup.
1037
 */
1037
 */
1038
int radeon_combios_init(struct radeon_device *rdev)
1038
int radeon_combios_init(struct radeon_device *rdev)
1039
{
1039
{
1040
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1040
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1041
	return 0;
1041
	return 0;
1042
}
1042
}
1043
 
1043
 
1044
/**
1044
/**
1045
 * radeon_combios_fini - free the driver info for combios
1045
 * radeon_combios_fini - free the driver info for combios
1046
 *
1046
 *
1047
 * @rdev: radeon_device pointer
1047
 * @rdev: radeon_device pointer
1048
 *
1048
 *
1049
 * Frees the driver info for combios (r1xx-r3xx).
1049
 * Frees the driver info for combios (r1xx-r3xx).
1050
 * Called at driver shutdown.
1050
 * Called at driver shutdown.
1051
 */
1051
 */
1052
void radeon_combios_fini(struct radeon_device *rdev)
1052
void radeon_combios_fini(struct radeon_device *rdev)
1053
{
1053
{
1054
}
1054
}
1055
 
1055
 
1056
/* if we get transitioned to only one device, take VGA back */
1056
/* if we get transitioned to only one device, take VGA back */
1057
/**
1057
/**
1058
 * radeon_vga_set_decode - enable/disable vga decode
1058
 * radeon_vga_set_decode - enable/disable vga decode
1059
 *
1059
 *
1060
 * @cookie: radeon_device pointer
1060
 * @cookie: radeon_device pointer
1061
 * @state: enable/disable vga decode
1061
 * @state: enable/disable vga decode
1062
 *
1062
 *
1063
 * Enable/disable vga decode (all asics).
1063
 * Enable/disable vga decode (all asics).
1064
 * Returns VGA resource flags.
1064
 * Returns VGA resource flags.
1065
 */
1065
 */
1066
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1066
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1067
{
1067
{
1068
	struct radeon_device *rdev = cookie;
1068
	struct radeon_device *rdev = cookie;
1069
	radeon_vga_set_state(rdev, state);
1069
	radeon_vga_set_state(rdev, state);
1070
	if (state)
1070
	if (state)
1071
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1071
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1072
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1072
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1073
	else
1073
	else
1074
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1074
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1075
}
1075
}
1076
 
1076
 
1077
/**
1077
/**
1078
 * radeon_check_pot_argument - check that argument is a power of two
1078
 * radeon_check_pot_argument - check that argument is a power of two
1079
 *
1079
 *
1080
 * @arg: value to check
1080
 * @arg: value to check
1081
 *
1081
 *
1082
 * Validates that a certain argument is a power of two (all asics).
1082
 * Validates that a certain argument is a power of two (all asics).
1083
 * Returns true if argument is valid.
1083
 * Returns true if argument is valid.
1084
 */
1084
 */
1085
static bool radeon_check_pot_argument(int arg)
1085
static bool radeon_check_pot_argument(int arg)
1086
{
1086
{
1087
	return (arg & (arg - 1)) == 0;
1087
	return (arg & (arg - 1)) == 0;
1088
}
1088
}
1089
 
1089
 
1090
/**
1090
/**
1091
 * radeon_check_arguments - validate module params
1091
 * radeon_check_arguments - validate module params
1092
 *
1092
 *
1093
 * @rdev: radeon_device pointer
1093
 * @rdev: radeon_device pointer
1094
 *
1094
 *
1095
 * Validates certain module parameters and updates
1095
 * Validates certain module parameters and updates
1096
 * the associated values used by the driver (all asics).
1096
 * the associated values used by the driver (all asics).
1097
 */
1097
 */
1098
static void radeon_check_arguments(struct radeon_device *rdev)
1098
static void radeon_check_arguments(struct radeon_device *rdev)
1099
{
1099
{
1100
	/* vramlimit must be a power of two */
1100
	/* vramlimit must be a power of two */
1101
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1101
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1102
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1102
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1103
				radeon_vram_limit);
1103
				radeon_vram_limit);
1104
		radeon_vram_limit = 0;
1104
		radeon_vram_limit = 0;
1105
	}
1105
	}
1106
 
1106
 
1107
	if (radeon_gart_size == -1) {
1107
	if (radeon_gart_size == -1) {
1108
		/* default to a larger gart size on newer asics */
1108
		/* default to a larger gart size on newer asics */
1109
		if (rdev->family >= CHIP_RV770)
1109
		if (rdev->family >= CHIP_RV770)
1110
			radeon_gart_size = 1024;
1110
			radeon_gart_size = 1024;
1111
		else
1111
		else
1112
			radeon_gart_size = 512;
1112
			radeon_gart_size = 512;
1113
	}
1113
	}
1114
	/* gtt size must be power of two and greater or equal to 32M */
1114
	/* gtt size must be power of two and greater or equal to 32M */
1115
	if (radeon_gart_size < 32) {
1115
	if (radeon_gart_size < 32) {
1116
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1116
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1117
				radeon_gart_size);
1117
				radeon_gart_size);
1118
		if (rdev->family >= CHIP_RV770)
1118
		if (rdev->family >= CHIP_RV770)
1119
			radeon_gart_size = 1024;
1119
			radeon_gart_size = 1024;
1120
		else
1120
		else
1121
		radeon_gart_size = 512;
1121
		radeon_gart_size = 512;
1122
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1122
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1123
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1123
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1124
				radeon_gart_size);
1124
				radeon_gart_size);
1125
		if (rdev->family >= CHIP_RV770)
1125
		if (rdev->family >= CHIP_RV770)
1126
			radeon_gart_size = 1024;
1126
			radeon_gart_size = 1024;
1127
		else
1127
		else
1128
		radeon_gart_size = 512;
1128
		radeon_gart_size = 512;
1129
	}
1129
	}
1130
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1130
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1131
 
1131
 
1132
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1132
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1133
	switch (radeon_agpmode) {
1133
	switch (radeon_agpmode) {
1134
	case -1:
1134
	case -1:
1135
	case 0:
1135
	case 0:
1136
	case 1:
1136
	case 1:
1137
	case 2:
1137
	case 2:
1138
	case 4:
1138
	case 4:
1139
	case 8:
1139
	case 8:
1140
		break;
1140
		break;
1141
	default:
1141
	default:
1142
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1142
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1143
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1143
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1144
		radeon_agpmode = 0;
1144
		radeon_agpmode = 0;
1145
		break;
1145
		break;
1146
	}
1146
	}
1147
 
1147
 
1148
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1148
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1149
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1149
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1150
			 radeon_vm_size);
1150
			 radeon_vm_size);
1151
		radeon_vm_size = 4;
1151
		radeon_vm_size = 4;
1152
	}
1152
	}
1153
 
1153
 
1154
	if (radeon_vm_size < 1) {
1154
	if (radeon_vm_size < 1) {
1155
		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1155
		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1156
			 radeon_vm_size);
1156
			 radeon_vm_size);
1157
		radeon_vm_size = 4;
1157
		radeon_vm_size = 4;
1158
	}
1158
	}
1159
 
1159
 
1160
       /*
1160
       /*
1161
        * Max GPUVM size for Cayman, SI and CI are 40 bits.
1161
        * Max GPUVM size for Cayman, SI and CI are 40 bits.
1162
        */
1162
        */
1163
	if (radeon_vm_size > 1024) {
1163
	if (radeon_vm_size > 1024) {
1164
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1164
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1165
			 radeon_vm_size);
1165
			 radeon_vm_size);
1166
		radeon_vm_size = 4;
1166
		radeon_vm_size = 4;
1167
	}
1167
	}
1168
 
1168
 
1169
	/* defines number of bits in page table versus page directory,
1169
	/* defines number of bits in page table versus page directory,
1170
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1170
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1171
	 * page table and the remaining bits are in the page directory */
1171
	 * page table and the remaining bits are in the page directory */
1172
	if (radeon_vm_block_size == -1) {
1172
	if (radeon_vm_block_size == -1) {
1173
 
1173
 
1174
		/* Total bits covered by PD + PTs */
1174
		/* Total bits covered by PD + PTs */
1175
		unsigned bits = ilog2(radeon_vm_size) + 17;
1175
		unsigned bits = ilog2(radeon_vm_size) + 17;
1176
 
1176
 
1177
		/* Make sure the PD is 4K in size up to 8GB address space.
1177
		/* Make sure the PD is 4K in size up to 8GB address space.
1178
		   Above that split equal between PD and PTs */
1178
		   Above that split equal between PD and PTs */
1179
		if (radeon_vm_size <= 8)
1179
		if (radeon_vm_size <= 8)
1180
			radeon_vm_block_size = bits - 9;
1180
			radeon_vm_block_size = bits - 9;
1181
		else
1181
		else
1182
			radeon_vm_block_size = (bits + 3) / 2;
1182
			radeon_vm_block_size = (bits + 3) / 2;
1183
 
1183
 
1184
	} else if (radeon_vm_block_size < 9) {
1184
	} else if (radeon_vm_block_size < 9) {
1185
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1185
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1186
			 radeon_vm_block_size);
1186
			 radeon_vm_block_size);
1187
		radeon_vm_block_size = 9;
1187
		radeon_vm_block_size = 9;
1188
	}
1188
	}
1189
 
1189
 
1190
	if (radeon_vm_block_size > 24 ||
1190
	if (radeon_vm_block_size > 24 ||
1191
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1191
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1192
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1192
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1193
			 radeon_vm_block_size);
1193
			 radeon_vm_block_size);
1194
		radeon_vm_block_size = 9;
1194
		radeon_vm_block_size = 9;
1195
	}
1195
	}
1196
}
1196
}
1197
 
1197
 
1198
/**
1198
/**
1199
 * radeon_device_init - initialize the driver
1199
 * radeon_device_init - initialize the driver
1200
 *
1200
 *
1201
 * @rdev: radeon_device pointer
1201
 * @rdev: radeon_device pointer
1202
 * @pdev: drm dev pointer
1202
 * @pdev: drm dev pointer
1203
 * @pdev: pci dev pointer
1203
 * @pdev: pci dev pointer
1204
 * @flags: driver flags
1204
 * @flags: driver flags
1205
 *
1205
 *
1206
 * Initializes the driver info and hw (all asics).
1206
 * Initializes the driver info and hw (all asics).
1207
 * Returns 0 for success or an error on failure.
1207
 * Returns 0 for success or an error on failure.
1208
 * Called at driver startup.
1208
 * Called at driver startup.
1209
 */
1209
 */
1210
int radeon_device_init(struct radeon_device *rdev,
1210
int radeon_device_init(struct radeon_device *rdev,
1211
               struct drm_device *ddev,
1211
               struct drm_device *ddev,
1212
               struct pci_dev *pdev,
1212
               struct pci_dev *pdev,
1213
               uint32_t flags)
1213
               uint32_t flags)
1214
{
1214
{
1215
	int r, i;
1215
	int r, i;
1216
	int dma_bits;
1216
	int dma_bits;
1217
	bool runtime = false;
1217
	bool runtime = false;
1218
 
1218
 
1219
    rdev->shutdown = false;
1219
    rdev->shutdown = false;
1220
	rdev->dev = &pdev->dev;
1220
	rdev->dev = &pdev->dev;
1221
    rdev->ddev = ddev;
1221
    rdev->ddev = ddev;
1222
    rdev->pdev = pdev;
1222
    rdev->pdev = pdev;
1223
    rdev->flags = flags;
1223
    rdev->flags = flags;
1224
    rdev->family = flags & RADEON_FAMILY_MASK;
1224
    rdev->family = flags & RADEON_FAMILY_MASK;
1225
    rdev->is_atom_bios = false;
1225
    rdev->is_atom_bios = false;
1226
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1226
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1227
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1227
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1228
	rdev->accel_working = false;
1228
	rdev->accel_working = false;
1229
	/* set up ring ids */
1229
	/* set up ring ids */
1230
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1230
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1231
		rdev->ring[i].idx = i;
1231
		rdev->ring[i].idx = i;
1232
	}
1232
	}
1233
 
1233
 
1234
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1234
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1235
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1235
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1236
		pdev->subsystem_vendor, pdev->subsystem_device);
1236
		pdev->subsystem_vendor, pdev->subsystem_device);
1237
 
1237
 
1238
    /* mutex initialization are all done here so we
1238
    /* mutex initialization are all done here so we
1239
     * can recall function without having locking issues */
1239
     * can recall function without having locking issues */
1240
	mutex_init(&rdev->ring_lock);
1240
	mutex_init(&rdev->ring_lock);
1241
	mutex_init(&rdev->dc_hw_i2c_mutex);
1241
	mutex_init(&rdev->dc_hw_i2c_mutex);
1242
	atomic_set(&rdev->ih.lock, 0);
1242
	atomic_set(&rdev->ih.lock, 0);
1243
	mutex_init(&rdev->gem.mutex);
1243
	mutex_init(&rdev->gem.mutex);
1244
	mutex_init(&rdev->pm.mutex);
1244
	mutex_init(&rdev->pm.mutex);
1245
	mutex_init(&rdev->gpu_clock_mutex);
1245
	mutex_init(&rdev->gpu_clock_mutex);
1246
	mutex_init(&rdev->srbm_mutex);
1246
	mutex_init(&rdev->srbm_mutex);
1247
//   init_rwsem(&rdev->pm.mclk_lock);
1247
//   init_rwsem(&rdev->pm.mclk_lock);
1248
//   init_rwsem(&rdev->exclusive_lock);
1248
//   init_rwsem(&rdev->exclusive_lock);
1249
	init_waitqueue_head(&rdev->irq.vblank_queue);
1249
	init_waitqueue_head(&rdev->irq.vblank_queue);
1250
	r = radeon_gem_init(rdev);
1250
	r = radeon_gem_init(rdev);
1251
	if (r)
1251
	if (r)
1252
		return r;
1252
		return r;
1253
 
1253
 
1254
	radeon_check_arguments(rdev);
1254
	radeon_check_arguments(rdev);
1255
	/* Adjust VM size here.
1255
	/* Adjust VM size here.
1256
	 * Max GPUVM size for cayman+ is 40 bits.
1256
	 * Max GPUVM size for cayman+ is 40 bits.
1257
	 */
1257
	 */
1258
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1258
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1259
 
1259
 
1260
	/* Set asic functions */
1260
	/* Set asic functions */
1261
	r = radeon_asic_init(rdev);
1261
	r = radeon_asic_init(rdev);
1262
	if (r)
1262
	if (r)
1263
		return r;
1263
		return r;
1264
 
1264
 
1265
	/* all of the newer IGP chips have an internal gart
1265
	/* all of the newer IGP chips have an internal gart
1266
	 * However some rs4xx report as AGP, so remove that here.
1266
	 * However some rs4xx report as AGP, so remove that here.
1267
	 */
1267
	 */
1268
	if ((rdev->family >= CHIP_RS400) &&
1268
	if ((rdev->family >= CHIP_RS400) &&
1269
	    (rdev->flags & RADEON_IS_IGP)) {
1269
	    (rdev->flags & RADEON_IS_IGP)) {
1270
		rdev->flags &= ~RADEON_IS_AGP;
1270
		rdev->flags &= ~RADEON_IS_AGP;
1271
	}
1271
	}
1272
 
1272
 
1273
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1273
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1274
		radeon_agp_disable(rdev);
1274
		radeon_agp_disable(rdev);
1275
    }
1275
    }
1276
 
1276
 
1277
	/* Set the internal MC address mask
1277
	/* Set the internal MC address mask
1278
	 * This is the max address of the GPU's
1278
	 * This is the max address of the GPU's
1279
	 * internal address space.
1279
	 * internal address space.
1280
	 */
1280
	 */
1281
	if (rdev->family >= CHIP_CAYMAN)
1281
	if (rdev->family >= CHIP_CAYMAN)
1282
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1282
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1283
	else if (rdev->family >= CHIP_CEDAR)
1283
	else if (rdev->family >= CHIP_CEDAR)
1284
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1284
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1285
	else
1285
	else
1286
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1286
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1287
 
1287
 
1288
	/* set DMA mask + need_dma32 flags.
1288
	/* set DMA mask + need_dma32 flags.
1289
	 * PCIE - can handle 40-bits.
1289
	 * PCIE - can handle 40-bits.
1290
	 * IGP - can handle 40-bits
1290
	 * IGP - can handle 40-bits
1291
	 * AGP - generally dma32 is safest
1291
	 * AGP - generally dma32 is safest
1292
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1292
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1293
	 */
1293
	 */
1294
	rdev->need_dma32 = false;
1294
	rdev->need_dma32 = false;
1295
	if (rdev->flags & RADEON_IS_AGP)
1295
	if (rdev->flags & RADEON_IS_AGP)
1296
		rdev->need_dma32 = true;
1296
		rdev->need_dma32 = true;
1297
	if ((rdev->flags & RADEON_IS_PCI) &&
1297
	if ((rdev->flags & RADEON_IS_PCI) &&
1298
	    (rdev->family <= CHIP_RS740))
1298
	    (rdev->family <= CHIP_RS740))
1299
		rdev->need_dma32 = true;
1299
		rdev->need_dma32 = true;
1300
 
1300
 
1301
	dma_bits = rdev->need_dma32 ? 32 : 40;
1301
	dma_bits = rdev->need_dma32 ? 32 : 40;
1302
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1302
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1303
    if (r) {
1303
    if (r) {
1304
		rdev->need_dma32 = true;
1304
		rdev->need_dma32 = true;
1305
		dma_bits = 32;
1305
		dma_bits = 32;
1306
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1306
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1307
    }
1307
    }
1308
 
1308
 
1309
    /* Registers mapping */
1309
    /* Registers mapping */
1310
    /* TODO: block userspace mapping of io register */
1310
    /* TODO: block userspace mapping of io register */
1311
	spin_lock_init(&rdev->mmio_idx_lock);
1311
	spin_lock_init(&rdev->mmio_idx_lock);
1312
	spin_lock_init(&rdev->smc_idx_lock);
1312
	spin_lock_init(&rdev->smc_idx_lock);
1313
	spin_lock_init(&rdev->pll_idx_lock);
1313
	spin_lock_init(&rdev->pll_idx_lock);
1314
	spin_lock_init(&rdev->mc_idx_lock);
1314
	spin_lock_init(&rdev->mc_idx_lock);
1315
	spin_lock_init(&rdev->pcie_idx_lock);
1315
	spin_lock_init(&rdev->pcie_idx_lock);
1316
	spin_lock_init(&rdev->pciep_idx_lock);
1316
	spin_lock_init(&rdev->pciep_idx_lock);
1317
	spin_lock_init(&rdev->pif_idx_lock);
1317
	spin_lock_init(&rdev->pif_idx_lock);
1318
	spin_lock_init(&rdev->cg_idx_lock);
1318
	spin_lock_init(&rdev->cg_idx_lock);
1319
	spin_lock_init(&rdev->uvd_idx_lock);
1319
	spin_lock_init(&rdev->uvd_idx_lock);
1320
	spin_lock_init(&rdev->rcu_idx_lock);
1320
	spin_lock_init(&rdev->rcu_idx_lock);
1321
	spin_lock_init(&rdev->didt_idx_lock);
1321
	spin_lock_init(&rdev->didt_idx_lock);
1322
	spin_lock_init(&rdev->end_idx_lock);
1322
	spin_lock_init(&rdev->end_idx_lock);
1323
	if (rdev->family >= CHIP_BONAIRE) {
1323
	if (rdev->family >= CHIP_BONAIRE) {
1324
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1324
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1325
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1325
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1326
	} else {
1326
	} else {
1327
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1327
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1328
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1328
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1329
	}
1329
	}
1330
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1330
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1331
    if (rdev->rmmio == NULL) {
1331
    if (rdev->rmmio == NULL) {
1332
        return -ENOMEM;
1332
        return -ENOMEM;
1333
    }
1333
    }
1334
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1334
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1335
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1335
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1336
 
1336
 
1337
	/* doorbell bar mapping */
1337
	/* doorbell bar mapping */
1338
	if (rdev->family >= CHIP_BONAIRE)
1338
	if (rdev->family >= CHIP_BONAIRE)
1339
		radeon_doorbell_init(rdev);
1339
		radeon_doorbell_init(rdev);
1340
 
1340
 
1341
	/* io port mapping */
1341
	/* io port mapping */
1342
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1342
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1343
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1343
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1344
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1344
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1345
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1345
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1346
			break;
1346
			break;
1347
		}
1347
		}
1348
	}
1348
	}
1349
	if (rdev->rio_mem == NULL)
1349
	if (rdev->rio_mem == NULL)
1350
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1350
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1351
 
1351
 
1352
	if (rdev->flags & RADEON_IS_PX)
1352
	if (rdev->flags & RADEON_IS_PX)
1353
		radeon_device_handle_px_quirks(rdev);
1353
		radeon_device_handle_px_quirks(rdev);
1354
	if (rdev->flags & RADEON_IS_PX)
1354
	if (rdev->flags & RADEON_IS_PX)
1355
		runtime = true;
1355
		runtime = true;
1356
 
1356
 
1357
	r = radeon_init(rdev);
1357
	r = radeon_init(rdev);
1358
	if (r)
1358
	if (r)
1359
        return r;
1359
        return r;
1360
 
1360
 
1361
	r = radeon_ib_ring_tests(rdev);
1361
	r = radeon_ib_ring_tests(rdev);
1362
	if (r)
1362
	if (r)
1363
		DRM_ERROR("ib ring test failed (%d).\n", r);
1363
		DRM_ERROR("ib ring test failed (%d).\n", r);
1364
 
1364
 
1365
 
1365
 
1366
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1366
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1367
		/* Acceleration not working on AGP card try again
1367
		/* Acceleration not working on AGP card try again
1368
		 * with fallback to PCI or PCIE GART
1368
		 * with fallback to PCI or PCIE GART
1369
		 */
1369
		 */
1370
		radeon_asic_reset(rdev);
1370
		radeon_asic_reset(rdev);
1371
		radeon_fini(rdev);
1371
		radeon_fini(rdev);
1372
		radeon_agp_disable(rdev);
1372
		radeon_agp_disable(rdev);
1373
		r = radeon_init(rdev);
1373
		r = radeon_init(rdev);
1374
		if (r)
1374
		if (r)
1375
		return r;
1375
		return r;
1376
	}
1376
	}
1377
 
1377
 
1378
	if ((radeon_testing & 1)) {
1378
	if ((radeon_testing & 1)) {
1379
		if (rdev->accel_working)
1379
		if (rdev->accel_working)
1380
			radeon_test_moves(rdev);
1380
			radeon_test_moves(rdev);
1381
		else
1381
		else
1382
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1382
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1383
	}
1383
	}
1384
	if ((radeon_testing & 2)) {
1384
	if ((radeon_testing & 2)) {
1385
		if (rdev->accel_working)
1385
		if (rdev->accel_working)
1386
			radeon_test_syncing(rdev);
1386
			radeon_test_syncing(rdev);
1387
		else
1387
		else
1388
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1388
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1389
	}
1389
	}
1390
   if (radeon_benchmarking) {
1390
   if (radeon_benchmarking) {
1391
		if (rdev->accel_working)
1391
		if (rdev->accel_working)
1392
		radeon_benchmark(rdev, radeon_benchmarking);
1392
		radeon_benchmark(rdev, radeon_benchmarking);
1393
		else
1393
		else
1394
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1394
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1395
    }
1395
    }
1396
	return 0;
1396
	return 0;
1397
}
1397
}
1398
 
1398
 
1399
/**
1399
/**
1400
 * radeon_gpu_reset - reset the asic
1400
 * radeon_gpu_reset - reset the asic
1401
 *
1401
 *
1402
 * @rdev: radeon device pointer
1402
 * @rdev: radeon device pointer
1403
 *
1403
 *
1404
 * Attempt the reset the GPU if it has hung (all asics).
1404
 * Attempt the reset the GPU if it has hung (all asics).
1405
 * Returns 0 for success or an error on failure.
1405
 * Returns 0 for success or an error on failure.
1406
 */
1406
 */
1407
int radeon_gpu_reset(struct radeon_device *rdev)
1407
int radeon_gpu_reset(struct radeon_device *rdev)
1408
{
1408
{
1409
    unsigned ring_sizes[RADEON_NUM_RINGS];
1409
    unsigned ring_sizes[RADEON_NUM_RINGS];
1410
    uint32_t *ring_data[RADEON_NUM_RINGS];
1410
    uint32_t *ring_data[RADEON_NUM_RINGS];
1411
 
1411
 
1412
    bool saved = false;
1412
    bool saved = false;
1413
 
1413
 
1414
    int i, r;
1414
    int i, r;
1415
    int resched;
1415
    int resched;
1416
 
1416
 
1417
//    down_write(&rdev->exclusive_lock);
1417
//    down_write(&rdev->exclusive_lock);
1418
	rdev->needs_reset = false;
1418
	rdev->needs_reset = false;
1419
 
1419
 
1420
    radeon_save_bios_scratch_regs(rdev);
1420
    radeon_save_bios_scratch_regs(rdev);
1421
    /* block TTM */
1421
    /* block TTM */
1422
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1422
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1423
    radeon_suspend(rdev);
1423
    radeon_suspend(rdev);
1424
 
1424
 
1425
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1425
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1426
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1426
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1427
                           &ring_data[i]);
1427
                           &ring_data[i]);
1428
        if (ring_sizes[i]) {
1428
        if (ring_sizes[i]) {
1429
            saved = true;
1429
            saved = true;
1430
            dev_info(rdev->dev, "Saved %d dwords of commands "
1430
            dev_info(rdev->dev, "Saved %d dwords of commands "
1431
                 "on ring %d.\n", ring_sizes[i], i);
1431
                 "on ring %d.\n", ring_sizes[i], i);
1432
        }
1432
        }
1433
    }
1433
    }
1434
 
1434
 
1435
retry:
1435
retry:
1436
    r = radeon_asic_reset(rdev);
1436
    r = radeon_asic_reset(rdev);
1437
    if (!r) {
1437
    if (!r) {
1438
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1438
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1439
        radeon_resume(rdev);
1439
        radeon_resume(rdev);
1440
    }
1440
    }
1441
 
1441
 
1442
    radeon_restore_bios_scratch_regs(rdev);
1442
    radeon_restore_bios_scratch_regs(rdev);
1443
 
1443
 
1444
    if (!r) {
1444
    if (!r) {
1445
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1445
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1446
            radeon_ring_restore(rdev, &rdev->ring[i],
1446
            radeon_ring_restore(rdev, &rdev->ring[i],
1447
                        ring_sizes[i], ring_data[i]);
1447
                        ring_sizes[i], ring_data[i]);
1448
            ring_sizes[i] = 0;
1448
            ring_sizes[i] = 0;
1449
            ring_data[i] = NULL;
1449
            ring_data[i] = NULL;
1450
        }
1450
        }
1451
 
1451
 
1452
//        r = radeon_ib_ring_tests(rdev);
1452
//        r = radeon_ib_ring_tests(rdev);
1453
//        if (r) {
1453
//        if (r) {
1454
//            dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1454
//            dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1455
//            if (saved) {
1455
//            if (saved) {
1456
//                saved = false;
1456
//                saved = false;
1457
//                radeon_suspend(rdev);
1457
//                radeon_suspend(rdev);
1458
//                goto retry;
1458
//                goto retry;
1459
//            }
1459
//            }
1460
//        }
1460
//        }
1461
    } else {
1461
    } else {
1462
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1462
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1463
            kfree(ring_data[i]);
1463
            kfree(ring_data[i]);
1464
        }
1464
        }
1465
    }
1465
    }
1466
 
1466
 
1467
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1467
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1468
    if (r) {
1468
    if (r) {
1469
        /* bad news, how to tell it to userspace ? */
1469
        /* bad news, how to tell it to userspace ? */
1470
        dev_info(rdev->dev, "GPU reset failed\n");
1470
        dev_info(rdev->dev, "GPU reset failed\n");
1471
    }
1471
    }
1472
 
1472
 
1473
//    up_write(&rdev->exclusive_lock);
1473
//    up_write(&rdev->exclusive_lock);
1474
    return r;
1474
    return r;
1475
}
1475
}
1476
 
1476
 
1477
 
1477
 
1478
/*
1478
/*
1479
 * Driver load/unload
1479
 * Driver load/unload
1480
 */
1480
 */
1481
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1481
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1482
{
1482
{
1483
    struct radeon_device *rdev;
1483
    struct radeon_device *rdev;
1484
    int r;
1484
    int r;
1485
 
1485
 
1486
 
1486
 
1487
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1487
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1488
    if (rdev == NULL) {
1488
    if (rdev == NULL) {
1489
        return -ENOMEM;
1489
        return -ENOMEM;
1490
    };
1490
    };
1491
 
1491
 
1492
    dev->dev_private = (void *)rdev;
1492
    dev->dev_private = (void *)rdev;
1493
 
1493
 
1494
    /* update BUS flag */
1494
    /* update BUS flag */
1495
    if (drm_device_is_agp(dev)) {
1495
    if (drm_pci_device_is_agp(dev)) {
1496
        flags |= RADEON_IS_AGP;
1496
        flags |= RADEON_IS_AGP;
1497
    } else if (drm_device_is_pcie(dev)) {
1497
    } else if (drm_device_is_pcie(dev)) {
1498
        flags |= RADEON_IS_PCIE;
1498
        flags |= RADEON_IS_PCIE;
1499
    } else {
1499
    } else {
1500
        flags |= RADEON_IS_PCI;
1500
        flags |= RADEON_IS_PCI;
1501
    }
1501
    }
1502
 
1502
 
1503
    /* radeon_device_init should report only fatal error
1503
    /* radeon_device_init should report only fatal error
1504
     * like memory allocation failure or iomapping failure,
1504
     * like memory allocation failure or iomapping failure,
1505
     * or memory manager initialization failure, it must
1505
     * or memory manager initialization failure, it must
1506
     * properly initialize the GPU MC controller and permit
1506
     * properly initialize the GPU MC controller and permit
1507
     * VRAM allocation
1507
     * VRAM allocation
1508
     */
1508
     */
1509
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1509
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1510
    if (r) {
1510
    if (r) {
1511
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1511
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1512
        return r;
1512
        return r;
1513
    }
1513
    }
1514
    /* Again modeset_init should fail only on fatal error
1514
    /* Again modeset_init should fail only on fatal error
1515
     * otherwise it should provide enough functionalities
1515
     * otherwise it should provide enough functionalities
1516
     * for shadowfb to run
1516
     * for shadowfb to run
1517
     */
1517
     */
1518
    main_device = dev;
1518
    main_device = dev;
1519
 
1519
 
1520
    if( radeon_modeset )
1520
    if( radeon_modeset )
1521
    {
1521
    {
1522
        r = radeon_modeset_init(rdev);
1522
        r = radeon_modeset_init(rdev);
1523
        if (r) {
1523
        if (r) {
1524
            return r;
1524
            return r;
1525
        }
1525
        }
1526
        init_display_kms(dev, &usermode);
1526
        init_display_kms(dev, &usermode);
1527
    }
1527
    }
1528
    else
1528
    else
1529
        init_display(rdev, &usermode);
1529
        init_display(rdev, &usermode);
1530
 
1530
 
1531
    return 0;
1531
    return 0;
1532
}
1532
}
1533
 
1533
 
1534
 
1534
 
1535
 
1535
 
1536
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1536
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1537
{
1537
{
1538
    return pci_resource_start(dev->pdev, resource);
1538
    return pci_resource_start(dev->pdev, resource);
1539
}
1539
}
1540
 
1540
 
1541
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1541
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1542
{
1542
{
1543
    return pci_resource_len(dev->pdev, resource);
1543
    return pci_resource_len(dev->pdev, resource);
1544
}
1544
}
1545
 
1545
 
1546
 
1546
 
1547
uint32_t __div64_32(uint64_t *n, uint32_t base)
1547
uint32_t __div64_32(uint64_t *n, uint32_t base)
1548
{
1548
{
1549
        uint64_t rem = *n;
1549
        uint64_t rem = *n;
1550
        uint64_t b = base;
1550
        uint64_t b = base;
1551
        uint64_t res, d = 1;
1551
        uint64_t res, d = 1;
1552
        uint32_t high = rem >> 32;
1552
        uint32_t high = rem >> 32;
1553
 
1553
 
1554
        /* Reduce the thing a bit first */
1554
        /* Reduce the thing a bit first */
1555
        res = 0;
1555
        res = 0;
1556
        if (high >= base) {
1556
        if (high >= base) {
1557
                high /= base;
1557
                high /= base;
1558
                res = (uint64_t) high << 32;
1558
                res = (uint64_t) high << 32;
1559
                rem -= (uint64_t) (high*base) << 32;
1559
                rem -= (uint64_t) (high*base) << 32;
1560
        }
1560
        }
1561
 
1561
 
1562
        while ((int64_t)b > 0 && b < rem) {
1562
        while ((int64_t)b > 0 && b < rem) {
1563
                b = b+b;
1563
                b = b+b;
1564
                d = d+d;
1564
                d = d+d;
1565
        }
1565
        }
1566
 
1566
 
1567
        do {
1567
        do {
1568
                if (rem >= b) {
1568
                if (rem >= b) {
1569
                        rem -= b;
1569
                        rem -= b;
1570
                        res += d;
1570
                        res += d;
1571
                }
1571
                }
1572
                b >>= 1;
1572
                b >>= 1;
1573
                d >>= 1;
1573
                d >>= 1;
1574
        } while (d);
1574
        } while (d);
1575
 
1575
 
1576
        *n = res;
1576
        *n = res;
1577
        return rem;
1577
        return rem;
1578
}
1578
}
1579
 
1579
 
1580
static struct pci_device_id pciidlist[] = {
1580
static struct pci_device_id pciidlist[] = {
1581
    radeon_PCI_IDS
1581
    radeon_PCI_IDS
1582
};
1582
};
1583
 
1583
 
1584
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1584
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1585
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1585
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1586
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1586
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1587
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1587
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1588
 
1588
 
1589
 
1589
 
1590
static struct drm_driver kms_driver = {
1590
static struct drm_driver kms_driver = {
1591
    .driver_features =
1591
    .driver_features =
1592
        DRIVER_USE_AGP |
1592
        DRIVER_USE_AGP |
1593
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1593
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1594
        DRIVER_PRIME | DRIVER_RENDER,
1594
        DRIVER_PRIME | DRIVER_RENDER,
1595
    .load = radeon_driver_load_kms,
1595
    .load = radeon_driver_load_kms,
1596
//    .open = radeon_driver_open_kms,
1596
//    .open = radeon_driver_open_kms,
1597
//    .preclose = radeon_driver_preclose_kms,
1597
//    .preclose = radeon_driver_preclose_kms,
1598
//    .postclose = radeon_driver_postclose_kms,
1598
//    .postclose = radeon_driver_postclose_kms,
1599
//    .lastclose = radeon_driver_lastclose_kms,
1599
//    .lastclose = radeon_driver_lastclose_kms,
1600
//    .unload = radeon_driver_unload_kms,
1600
//    .unload = radeon_driver_unload_kms,
1601
//    .get_vblank_counter = radeon_get_vblank_counter_kms,
1601
//    .get_vblank_counter = radeon_get_vblank_counter_kms,
1602
//    .enable_vblank = radeon_enable_vblank_kms,
1602
//    .enable_vblank = radeon_enable_vblank_kms,
1603
//    .disable_vblank = radeon_disable_vblank_kms,
1603
//    .disable_vblank = radeon_disable_vblank_kms,
1604
//    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1604
//    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1605
//    .get_scanout_position = radeon_get_crtc_scanoutpos,
1605
//    .get_scanout_position = radeon_get_crtc_scanoutpos,
1606
#if defined(CONFIG_DEBUG_FS)
1606
#if defined(CONFIG_DEBUG_FS)
1607
    .debugfs_init = radeon_debugfs_init,
1607
    .debugfs_init = radeon_debugfs_init,
1608
    .debugfs_cleanup = radeon_debugfs_cleanup,
1608
    .debugfs_cleanup = radeon_debugfs_cleanup,
1609
#endif
1609
#endif
1610
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1610
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1611
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1611
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1612
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1612
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1613
    .irq_handler = radeon_driver_irq_handler_kms,
1613
    .irq_handler = radeon_driver_irq_handler_kms,
1614
//    .ioctls = radeon_ioctls_kms,
1614
//    .ioctls = radeon_ioctls_kms,
1615
//    .gem_free_object = radeon_gem_object_free,
1615
//    .gem_free_object = radeon_gem_object_free,
1616
//    .gem_open_object = radeon_gem_object_open,
1616
//    .gem_open_object = radeon_gem_object_open,
1617
//    .gem_close_object = radeon_gem_object_close,
1617
//    .gem_close_object = radeon_gem_object_close,
1618
//    .dumb_create = radeon_mode_dumb_create,
1618
//    .dumb_create = radeon_mode_dumb_create,
1619
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1619
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1620
//    .dumb_destroy = drm_gem_dumb_destroy,
1620
//    .dumb_destroy = drm_gem_dumb_destroy,
1621
//    .fops = &radeon_driver_kms_fops,
1621
//    .fops = &radeon_driver_kms_fops,
1622
 
1622
 
1623
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1623
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1624
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1624
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1625
//    .gem_prime_export = drm_gem_prime_export,
1625
//    .gem_prime_export = drm_gem_prime_export,
1626
//    .gem_prime_import = drm_gem_prime_import,
1626
//    .gem_prime_import = drm_gem_prime_import,
1627
//    .gem_prime_pin = radeon_gem_prime_pin,
1627
//    .gem_prime_pin = radeon_gem_prime_pin,
1628
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1628
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1629
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1629
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1630
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1630
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1631
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1631
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1632
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1632
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1633
 
1633
 
1634
};
1634
};
1635
 
1635
 
1636
int ati_init(void)
1636
int ati_init(void)
1637
{
1637
{
1638
    static pci_dev_t device;
1638
    static pci_dev_t device;
1639
    const struct pci_device_id  *ent;
1639
    const struct pci_device_id  *ent;
1640
    int  err;
1640
    int  err;
1641
 
1641
 
1642
    ent = find_pci_device(&device, pciidlist);
1642
    ent = find_pci_device(&device, pciidlist);
1643
    if( unlikely(ent == NULL) )
1643
    if( unlikely(ent == NULL) )
1644
    {
1644
    {
1645
        dbgprintf("device not found\n");
1645
        dbgprintf("device not found\n");
1646
        return -ENODEV;
1646
        return -ENODEV;
1647
    };
1647
    };
1648
 
1648
 
1649
    drm_core_init();
1649
    drm_core_init();
1650
 
1650
 
1651
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1651
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1652
                                device.pci_dev.device);
1652
                                device.pci_dev.device);
1653
 
1653
 
1654
    kms_driver.driver_features |= DRIVER_MODESET;
1654
    kms_driver.driver_features |= DRIVER_MODESET;
1655
 
1655
 
1656
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1656
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1657
 
1657
 
1658
    return err;
1658
    return err;
1659
}
1659
}