Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6104 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 5346
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
//#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "radeon_reg.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
34
#include "radeon.h"
35
#include "atom.h"
35
#include "atom.h"
36
 
36
 
37
#include "bitmap.h"
37
#include "bitmap.h"
38
#include "display.h"
38
#include "display.h"
39
 
39
 
40
 
40
 
41
#include 
41
#include 
42
 
42
 
43
#define PCI_VENDOR_ID_ATI               0x1002
43
#define PCI_VENDOR_ID_ATI               0x1002
44
#define PCI_VENDOR_ID_APPLE             0x106b
44
#define PCI_VENDOR_ID_APPLE             0x106b
45
 
45
 
46
int radeon_no_wb;
46
int radeon_no_wb;
47
int radeon_modeset = -1;
47
int radeon_modeset = -1;
48
int radeon_dynclks = -1;
48
int radeon_dynclks = -1;
49
int radeon_r4xx_atom = 0;
49
int radeon_r4xx_atom = 0;
50
int radeon_agpmode = 0;
50
int radeon_agpmode = 0;
51
int radeon_vram_limit = 0;
51
int radeon_vram_limit = 0;
52
int radeon_gart_size = -1; /* auto */
52
int radeon_gart_size = -1; /* auto */
53
int radeon_benchmarking = 0;
53
int radeon_benchmarking = 0;
54
int radeon_testing = 0;
54
int radeon_testing = 0;
55
int radeon_connector_table = 0;
55
int radeon_connector_table = 0;
56
int radeon_tv = 1;
56
int radeon_tv = 1;
57
int radeon_audio = -1;
57
int radeon_audio = -1;
58
int radeon_disp_priority = 0;
58
int radeon_disp_priority = 0;
59
int radeon_hw_i2c = 0;
59
int radeon_hw_i2c = 0;
60
int radeon_pcie_gen2 = -1;
60
int radeon_pcie_gen2 = -1;
61
int radeon_msi = -1;
61
int radeon_msi = -1;
62
int radeon_lockup_timeout = 10000;
62
int radeon_lockup_timeout = 10000;
63
int radeon_fastfb = 0;
63
int radeon_fastfb = 0;
64
int radeon_dpm = -1;
64
int radeon_dpm = -1;
65
int radeon_aspm = -1;
65
int radeon_aspm = -1;
66
int radeon_runtime_pm = -1;
66
int radeon_runtime_pm = -1;
67
int radeon_hard_reset = 0;
67
int radeon_hard_reset = 0;
68
int radeon_vm_size = 8;
68
int radeon_vm_size = 8;
69
int radeon_vm_block_size = -1;
69
int radeon_vm_block_size = -1;
70
int radeon_deep_color = 0;
70
int radeon_deep_color = 0;
71
int radeon_use_pflipirq = 2;
71
int radeon_use_pflipirq = 2;
72
int irq_override = 0;
72
int irq_override = 0;
73
int radeon_bapm = -1;
73
int radeon_bapm = -1;
74
int radeon_backlight = 0;
74
int radeon_backlight = 0;
75
 
75
 
76
extern display_t *os_display;
76
extern display_t *os_display;
77
extern struct drm_device *main_device;
77
extern struct drm_device *main_device;
78
extern videomode_t usermode;
78
extern videomode_t usermode;
79
 
79
 
80
 
80
 
81
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
81
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
82
int init_display(struct radeon_device *rdev, videomode_t *mode);
82
int init_display(struct radeon_device *rdev, videomode_t *mode);
83
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
83
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
84
 
84
 
85
int get_modes(videomode_t *mode, u32 *count);
85
int get_modes(videomode_t *mode, u32 *count);
86
int set_user_mode(videomode_t *mode);
86
int set_user_mode(videomode_t *mode);
87
int r100_2D_test(struct radeon_device *rdev);
87
int r100_2D_test(struct radeon_device *rdev);
88
 
88
 
89
 
89
 
90
 /* Legacy VGA regions */
90
 /* Legacy VGA regions */
91
#define VGA_RSRC_NONE          0x00
91
#define VGA_RSRC_NONE          0x00
92
#define VGA_RSRC_LEGACY_IO     0x01
92
#define VGA_RSRC_LEGACY_IO     0x01
93
#define VGA_RSRC_LEGACY_MEM    0x02
93
#define VGA_RSRC_LEGACY_MEM    0x02
94
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
94
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
95
/* Non-legacy access */
95
/* Non-legacy access */
96
#define VGA_RSRC_NORMAL_IO     0x04
96
#define VGA_RSRC_NORMAL_IO     0x04
97
#define VGA_RSRC_NORMAL_MEM    0x08
97
#define VGA_RSRC_NORMAL_MEM    0x08
98
 
98
 
99
 
99
 
100
static const char radeon_family_name[][16] = {
100
static const char radeon_family_name[][16] = {
101
	"R100",
101
	"R100",
102
	"RV100",
102
	"RV100",
103
	"RS100",
103
	"RS100",
104
	"RV200",
104
	"RV200",
105
	"RS200",
105
	"RS200",
106
	"R200",
106
	"R200",
107
	"RV250",
107
	"RV250",
108
	"RS300",
108
	"RS300",
109
	"RV280",
109
	"RV280",
110
	"R300",
110
	"R300",
111
	"R350",
111
	"R350",
112
	"RV350",
112
	"RV350",
113
	"RV380",
113
	"RV380",
114
	"R420",
114
	"R420",
115
	"R423",
115
	"R423",
116
	"RV410",
116
	"RV410",
117
	"RS400",
117
	"RS400",
118
	"RS480",
118
	"RS480",
119
	"RS600",
119
	"RS600",
120
	"RS690",
120
	"RS690",
121
	"RS740",
121
	"RS740",
122
	"RV515",
122
	"RV515",
123
	"R520",
123
	"R520",
124
	"RV530",
124
	"RV530",
125
	"RV560",
125
	"RV560",
126
	"RV570",
126
	"RV570",
127
	"R580",
127
	"R580",
128
	"R600",
128
	"R600",
129
	"RV610",
129
	"RV610",
130
	"RV630",
130
	"RV630",
131
	"RV670",
131
	"RV670",
132
	"RV620",
132
	"RV620",
133
	"RV635",
133
	"RV635",
134
	"RS780",
134
	"RS780",
135
	"RS880",
135
	"RS880",
136
	"RV770",
136
	"RV770",
137
	"RV730",
137
	"RV730",
138
	"RV710",
138
	"RV710",
139
	"RV740",
139
	"RV740",
140
	"CEDAR",
140
	"CEDAR",
141
	"REDWOOD",
141
	"REDWOOD",
142
	"JUNIPER",
142
	"JUNIPER",
143
	"CYPRESS",
143
	"CYPRESS",
144
	"HEMLOCK",
144
	"HEMLOCK",
145
	"PALM",
145
	"PALM",
146
	"SUMO",
146
	"SUMO",
147
	"SUMO2",
147
	"SUMO2",
148
	"BARTS",
148
	"BARTS",
149
	"TURKS",
149
	"TURKS",
150
	"CAICOS",
150
	"CAICOS",
151
	"CAYMAN",
151
	"CAYMAN",
152
	"ARUBA",
152
	"ARUBA",
153
	"TAHITI",
153
	"TAHITI",
154
	"PITCAIRN",
154
	"PITCAIRN",
155
	"VERDE",
155
	"VERDE",
156
	"OLAND",
156
	"OLAND",
157
	"HAINAN",
157
	"HAINAN",
158
	"BONAIRE",
158
	"BONAIRE",
159
	"KAVERI",
159
	"KAVERI",
160
	"KABINI",
160
	"KABINI",
161
	"HAWAII",
161
	"HAWAII",
162
	"MULLINS",
162
	"MULLINS",
163
	"LAST",
163
	"LAST",
164
};
164
};
165
 
165
 
166
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
166
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
167
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
167
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
168
 
168
 
169
struct radeon_px_quirk {
169
struct radeon_px_quirk {
170
	u32 chip_vendor;
170
	u32 chip_vendor;
171
	u32 chip_device;
171
	u32 chip_device;
172
	u32 subsys_vendor;
172
	u32 subsys_vendor;
173
	u32 subsys_device;
173
	u32 subsys_device;
174
	u32 px_quirk_flags;
174
	u32 px_quirk_flags;
175
};
175
};
176
 
176
 
177
static struct radeon_px_quirk radeon_px_quirk_list[] = {
177
static struct radeon_px_quirk radeon_px_quirk_list[] = {
178
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
178
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
179
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
179
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
180
	 */
180
	 */
181
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
181
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
182
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
182
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
183
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
183
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
184
	 */
184
	 */
185
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
185
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
186
	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
186
	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
187
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
187
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
188
	 */
188
	 */
189
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
189
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
190
	/* macbook pro 8.2 */
190
	/* macbook pro 8.2 */
191
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
191
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
192
	{ 0, 0, 0, 0, 0 },
192
	{ 0, 0, 0, 0, 0 },
193
};
193
};
194
 
194
 
195
bool radeon_is_px(struct drm_device *dev)
195
bool radeon_is_px(struct drm_device *dev)
196
{
196
{
197
	struct radeon_device *rdev = dev->dev_private;
197
	struct radeon_device *rdev = dev->dev_private;
198
 
198
 
199
	if (rdev->flags & RADEON_IS_PX)
199
	if (rdev->flags & RADEON_IS_PX)
200
		return true;
200
		return true;
201
	return false;
201
	return false;
202
}
202
}
203
 
203
 
204
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
204
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
205
{
205
{
206
	struct radeon_px_quirk *p = radeon_px_quirk_list;
206
	struct radeon_px_quirk *p = radeon_px_quirk_list;
207
 
207
 
208
	/* Apply PX quirks */
208
	/* Apply PX quirks */
209
	while (p && p->chip_device != 0) {
209
	while (p && p->chip_device != 0) {
210
		if (rdev->pdev->vendor == p->chip_vendor &&
210
		if (rdev->pdev->vendor == p->chip_vendor &&
211
		    rdev->pdev->device == p->chip_device &&
211
		    rdev->pdev->device == p->chip_device &&
212
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
212
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
213
		    rdev->pdev->subsystem_device == p->subsys_device) {
213
		    rdev->pdev->subsystem_device == p->subsys_device) {
214
			rdev->px_quirk_flags = p->px_quirk_flags;
214
			rdev->px_quirk_flags = p->px_quirk_flags;
215
			break;
215
			break;
216
		}
216
		}
217
		++p;
217
		++p;
218
	}
218
	}
219
 
219
 
220
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
220
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
221
		rdev->flags &= ~RADEON_IS_PX;
221
		rdev->flags &= ~RADEON_IS_PX;
222
}
222
}
223
 
223
 
224
/**
224
/**
225
 * radeon_program_register_sequence - program an array of registers.
225
 * radeon_program_register_sequence - program an array of registers.
226
 *
226
 *
227
 * @rdev: radeon_device pointer
227
 * @rdev: radeon_device pointer
228
 * @registers: pointer to the register array
228
 * @registers: pointer to the register array
229
 * @array_size: size of the register array
229
 * @array_size: size of the register array
230
 *
230
 *
231
 * Programs an array or registers with and and or masks.
231
 * Programs an array or registers with and and or masks.
232
 * This is a helper for setting golden registers.
232
 * This is a helper for setting golden registers.
233
 */
233
 */
234
void radeon_program_register_sequence(struct radeon_device *rdev,
234
void radeon_program_register_sequence(struct radeon_device *rdev,
235
				      const u32 *registers,
235
				      const u32 *registers,
236
				      const u32 array_size)
236
				      const u32 array_size)
237
{
237
{
238
	u32 tmp, reg, and_mask, or_mask;
238
	u32 tmp, reg, and_mask, or_mask;
239
	int i;
239
	int i;
240
 
240
 
241
	if (array_size % 3)
241
	if (array_size % 3)
242
		return;
242
		return;
243
 
243
 
244
	for (i = 0; i < array_size; i +=3) {
244
	for (i = 0; i < array_size; i +=3) {
245
		reg = registers[i + 0];
245
		reg = registers[i + 0];
246
		and_mask = registers[i + 1];
246
		and_mask = registers[i + 1];
247
		or_mask = registers[i + 2];
247
		or_mask = registers[i + 2];
248
 
248
 
249
		if (and_mask == 0xffffffff) {
249
		if (and_mask == 0xffffffff) {
250
			tmp = or_mask;
250
			tmp = or_mask;
251
		} else {
251
		} else {
252
			tmp = RREG32(reg);
252
			tmp = RREG32(reg);
253
			tmp &= ~and_mask;
253
			tmp &= ~and_mask;
254
			tmp |= or_mask;
254
			tmp |= or_mask;
255
		}
255
		}
256
		WREG32(reg, tmp);
256
		WREG32(reg, tmp);
257
	}
257
	}
258
}
258
}
259
 
259
 
260
void radeon_pci_config_reset(struct radeon_device *rdev)
260
void radeon_pci_config_reset(struct radeon_device *rdev)
261
{
261
{
262
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
262
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
263
}
263
}
264
 
264
 
265
/**
265
/**
266
 * radeon_surface_init - Clear GPU surface registers.
266
 * radeon_surface_init - Clear GPU surface registers.
267
 *
267
 *
268
 * @rdev: radeon_device pointer
268
 * @rdev: radeon_device pointer
269
 *
269
 *
270
 * Clear GPU surface registers (r1xx-r5xx).
270
 * Clear GPU surface registers (r1xx-r5xx).
271
 */
271
 */
272
void radeon_surface_init(struct radeon_device *rdev)
272
void radeon_surface_init(struct radeon_device *rdev)
273
{
273
{
274
    /* FIXME: check this out */
274
    /* FIXME: check this out */
275
    if (rdev->family < CHIP_R600) {
275
    if (rdev->family < CHIP_R600) {
276
        int i;
276
        int i;
277
 
277
 
278
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
278
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
279
			if (rdev->surface_regs[i].bo)
279
			if (rdev->surface_regs[i].bo)
280
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
280
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
281
			else
281
			else
282
           radeon_clear_surface_reg(rdev, i);
282
           radeon_clear_surface_reg(rdev, i);
283
        }
283
        }
284
		/* enable surfaces */
284
		/* enable surfaces */
285
		WREG32(RADEON_SURFACE_CNTL, 0);
285
		WREG32(RADEON_SURFACE_CNTL, 0);
286
    }
286
    }
287
}
287
}
288
 
288
 
289
/*
289
/*
290
 * GPU scratch registers helpers function.
290
 * GPU scratch registers helpers function.
291
 */
291
 */
292
/**
292
/**
293
 * radeon_scratch_init - Init scratch register driver information.
293
 * radeon_scratch_init - Init scratch register driver information.
294
 *
294
 *
295
 * @rdev: radeon_device pointer
295
 * @rdev: radeon_device pointer
296
 *
296
 *
297
 * Init CP scratch register driver information (r1xx-r5xx)
297
 * Init CP scratch register driver information (r1xx-r5xx)
298
 */
298
 */
299
void radeon_scratch_init(struct radeon_device *rdev)
299
void radeon_scratch_init(struct radeon_device *rdev)
300
{
300
{
301
    int i;
301
    int i;
302
 
302
 
303
    /* FIXME: check this out */
303
    /* FIXME: check this out */
304
    if (rdev->family < CHIP_R300) {
304
    if (rdev->family < CHIP_R300) {
305
        rdev->scratch.num_reg = 5;
305
        rdev->scratch.num_reg = 5;
306
    } else {
306
    } else {
307
        rdev->scratch.num_reg = 7;
307
        rdev->scratch.num_reg = 7;
308
    }
308
    }
309
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
309
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
310
    for (i = 0; i < rdev->scratch.num_reg; i++) {
310
    for (i = 0; i < rdev->scratch.num_reg; i++) {
311
        rdev->scratch.free[i] = true;
311
        rdev->scratch.free[i] = true;
312
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
312
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
313
    }
313
    }
314
}
314
}
315
 
315
 
316
/**
316
/**
317
 * radeon_scratch_get - Allocate a scratch register
317
 * radeon_scratch_get - Allocate a scratch register
318
 *
318
 *
319
 * @rdev: radeon_device pointer
319
 * @rdev: radeon_device pointer
320
 * @reg: scratch register mmio offset
320
 * @reg: scratch register mmio offset
321
 *
321
 *
322
 * Allocate a CP scratch register for use by the driver (all asics).
322
 * Allocate a CP scratch register for use by the driver (all asics).
323
 * Returns 0 on success or -EINVAL on failure.
323
 * Returns 0 on success or -EINVAL on failure.
324
 */
324
 */
325
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
325
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
326
{
326
{
327
	int i;
327
	int i;
328
 
328
 
329
	for (i = 0; i < rdev->scratch.num_reg; i++) {
329
	for (i = 0; i < rdev->scratch.num_reg; i++) {
330
		if (rdev->scratch.free[i]) {
330
		if (rdev->scratch.free[i]) {
331
			rdev->scratch.free[i] = false;
331
			rdev->scratch.free[i] = false;
332
			*reg = rdev->scratch.reg[i];
332
			*reg = rdev->scratch.reg[i];
333
			return 0;
333
			return 0;
334
		}
334
		}
335
	}
335
	}
336
	return -EINVAL;
336
	return -EINVAL;
337
}
337
}
338
 
338
 
339
/**
339
/**
340
 * radeon_scratch_free - Free a scratch register
340
 * radeon_scratch_free - Free a scratch register
341
 *
341
 *
342
 * @rdev: radeon_device pointer
342
 * @rdev: radeon_device pointer
343
 * @reg: scratch register mmio offset
343
 * @reg: scratch register mmio offset
344
 *
344
 *
345
 * Free a CP scratch register allocated for use by the driver (all asics)
345
 * Free a CP scratch register allocated for use by the driver (all asics)
346
 */
346
 */
347
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
347
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
348
{
348
{
349
	int i;
349
	int i;
350
 
350
 
351
	for (i = 0; i < rdev->scratch.num_reg; i++) {
351
	for (i = 0; i < rdev->scratch.num_reg; i++) {
352
		if (rdev->scratch.reg[i] == reg) {
352
		if (rdev->scratch.reg[i] == reg) {
353
			rdev->scratch.free[i] = true;
353
			rdev->scratch.free[i] = true;
354
			return;
354
			return;
355
		}
355
		}
356
	}
356
	}
357
}
357
}
358
 
358
 
359
/*
359
/*
360
 * GPU doorbell aperture helpers function.
360
 * GPU doorbell aperture helpers function.
361
 */
361
 */
362
/**
362
/**
363
 * radeon_doorbell_init - Init doorbell driver information.
363
 * radeon_doorbell_init - Init doorbell driver information.
364
 *
364
 *
365
 * @rdev: radeon_device pointer
365
 * @rdev: radeon_device pointer
366
 *
366
 *
367
 * Init doorbell driver information (CIK)
367
 * Init doorbell driver information (CIK)
368
 * Returns 0 on success, error on failure.
368
 * Returns 0 on success, error on failure.
369
 */
369
 */
370
static int radeon_doorbell_init(struct radeon_device *rdev)
370
static int radeon_doorbell_init(struct radeon_device *rdev)
371
{
371
{
372
	/* doorbell bar mapping */
372
	/* doorbell bar mapping */
373
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
373
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
374
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
374
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
375
 
375
 
376
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
376
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
377
	if (rdev->doorbell.num_doorbells == 0)
377
	if (rdev->doorbell.num_doorbells == 0)
378
		return -EINVAL;
378
		return -EINVAL;
379
 
379
 
380
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
380
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
381
	if (rdev->doorbell.ptr == NULL) {
381
	if (rdev->doorbell.ptr == NULL) {
382
		return -ENOMEM;
382
		return -ENOMEM;
383
	}
383
	}
384
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
384
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
385
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
385
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
386
 
386
 
387
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
387
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
388
 
388
 
389
	return 0;
389
	return 0;
390
}
390
}
391
 
391
 
392
/**
392
/**
393
 * radeon_doorbell_fini - Tear down doorbell driver information.
393
 * radeon_doorbell_fini - Tear down doorbell driver information.
394
 *
394
 *
395
 * @rdev: radeon_device pointer
395
 * @rdev: radeon_device pointer
396
 *
396
 *
397
 * Tear down doorbell driver information (CIK)
397
 * Tear down doorbell driver information (CIK)
398
 */
398
 */
399
static void radeon_doorbell_fini(struct radeon_device *rdev)
399
static void radeon_doorbell_fini(struct radeon_device *rdev)
400
{
400
{
401
	iounmap(rdev->doorbell.ptr);
401
	iounmap(rdev->doorbell.ptr);
402
	rdev->doorbell.ptr = NULL;
402
	rdev->doorbell.ptr = NULL;
403
}
403
}
404
 
404
 
405
/**
405
/**
406
 * radeon_doorbell_get - Allocate a doorbell entry
406
 * radeon_doorbell_get - Allocate a doorbell entry
407
 *
407
 *
408
 * @rdev: radeon_device pointer
408
 * @rdev: radeon_device pointer
409
 * @doorbell: doorbell index
409
 * @doorbell: doorbell index
410
 *
410
 *
411
 * Allocate a doorbell for use by the driver (all asics).
411
 * Allocate a doorbell for use by the driver (all asics).
412
 * Returns 0 on success or -EINVAL on failure.
412
 * Returns 0 on success or -EINVAL on failure.
413
 */
413
 */
414
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
414
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
415
{
415
{
416
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
416
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
417
	if (offset < rdev->doorbell.num_doorbells) {
417
	if (offset < rdev->doorbell.num_doorbells) {
418
		__set_bit(offset, rdev->doorbell.used);
418
		__set_bit(offset, rdev->doorbell.used);
419
		*doorbell = offset;
419
		*doorbell = offset;
420
		return 0;
420
		return 0;
421
	} else {
421
	} else {
422
		return -EINVAL;
422
		return -EINVAL;
423
	}
423
	}
424
}
424
}
425
 
425
 
426
/**
426
/**
427
 * radeon_doorbell_free - Free a doorbell entry
427
 * radeon_doorbell_free - Free a doorbell entry
428
 *
428
 *
429
 * @rdev: radeon_device pointer
429
 * @rdev: radeon_device pointer
430
 * @doorbell: doorbell index
430
 * @doorbell: doorbell index
431
 *
431
 *
432
 * Free a doorbell allocated for use by the driver (all asics)
432
 * Free a doorbell allocated for use by the driver (all asics)
433
 */
433
 */
434
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
434
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
435
{
435
{
436
	if (doorbell < rdev->doorbell.num_doorbells)
436
	if (doorbell < rdev->doorbell.num_doorbells)
437
		__clear_bit(doorbell, rdev->doorbell.used);
437
		__clear_bit(doorbell, rdev->doorbell.used);
438
}
438
}
439
 
439
 
440
/**
440
/**
441
 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
441
 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
442
 *                                setup KFD
442
 *                                setup KFD
443
 *
443
 *
444
 * @rdev: radeon_device pointer
444
 * @rdev: radeon_device pointer
445
 * @aperture_base: output returning doorbell aperture base physical address
445
 * @aperture_base: output returning doorbell aperture base physical address
446
 * @aperture_size: output returning doorbell aperture size in bytes
446
 * @aperture_size: output returning doorbell aperture size in bytes
447
 * @start_offset: output returning # of doorbell bytes reserved for radeon.
447
 * @start_offset: output returning # of doorbell bytes reserved for radeon.
448
 *
448
 *
449
 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
449
 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
450
 * takes doorbells required for its own rings and reports the setup to KFD.
450
 * takes doorbells required for its own rings and reports the setup to KFD.
451
 * Radeon reserved doorbells are at the start of the doorbell aperture.
451
 * Radeon reserved doorbells are at the start of the doorbell aperture.
452
 */
452
 */
453
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
453
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
454
				  phys_addr_t *aperture_base,
454
				  phys_addr_t *aperture_base,
455
				  size_t *aperture_size,
455
				  size_t *aperture_size,
456
				  size_t *start_offset)
456
				  size_t *start_offset)
457
{
457
{
458
	/* The first num_doorbells are used by radeon.
458
	/* The first num_doorbells are used by radeon.
459
	 * KFD takes whatever's left in the aperture. */
459
	 * KFD takes whatever's left in the aperture. */
460
	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
460
	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
461
		*aperture_base = rdev->doorbell.base;
461
		*aperture_base = rdev->doorbell.base;
462
		*aperture_size = rdev->doorbell.size;
462
		*aperture_size = rdev->doorbell.size;
463
		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
463
		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
464
	} else {
464
	} else {
465
		*aperture_base = 0;
465
		*aperture_base = 0;
466
		*aperture_size = 0;
466
		*aperture_size = 0;
467
		*start_offset = 0;
467
		*start_offset = 0;
468
	}
468
	}
469
}
469
}
470
 
470
 
471
/*
471
/*
472
 * radeon_wb_*()
472
 * radeon_wb_*()
473
 * Writeback is the the method by which the the GPU updates special pages
473
 * Writeback is the the method by which the the GPU updates special pages
474
 * in memory with the status of certain GPU events (fences, ring pointers,
474
 * in memory with the status of certain GPU events (fences, ring pointers,
475
 * etc.).
475
 * etc.).
476
 */
476
 */
477
 
477
 
478
/**
478
/**
479
 * radeon_wb_disable - Disable Writeback
479
 * radeon_wb_disable - Disable Writeback
480
 *
480
 *
481
 * @rdev: radeon_device pointer
481
 * @rdev: radeon_device pointer
482
 *
482
 *
483
 * Disables Writeback (all asics).  Used for suspend.
483
 * Disables Writeback (all asics).  Used for suspend.
484
 */
484
 */
485
void radeon_wb_disable(struct radeon_device *rdev)
485
void radeon_wb_disable(struct radeon_device *rdev)
486
{
486
{
487
	rdev->wb.enabled = false;
487
	rdev->wb.enabled = false;
488
}
488
}
489
 
489
 
490
/**
490
/**
491
 * radeon_wb_fini - Disable Writeback and free memory
491
 * radeon_wb_fini - Disable Writeback and free memory
492
 *
492
 *
493
 * @rdev: radeon_device pointer
493
 * @rdev: radeon_device pointer
494
 *
494
 *
495
 * Disables Writeback and frees the Writeback memory (all asics).
495
 * Disables Writeback and frees the Writeback memory (all asics).
496
 * Used at driver shutdown.
496
 * Used at driver shutdown.
497
 */
497
 */
498
void radeon_wb_fini(struct radeon_device *rdev)
498
void radeon_wb_fini(struct radeon_device *rdev)
499
{
499
{
500
	radeon_wb_disable(rdev);
500
	radeon_wb_disable(rdev);
501
	if (rdev->wb.wb_obj) {
501
	if (rdev->wb.wb_obj) {
502
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
502
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
503
			radeon_bo_kunmap(rdev->wb.wb_obj);
503
			radeon_bo_kunmap(rdev->wb.wb_obj);
504
			radeon_bo_unpin(rdev->wb.wb_obj);
504
			radeon_bo_unpin(rdev->wb.wb_obj);
505
			radeon_bo_unreserve(rdev->wb.wb_obj);
505
			radeon_bo_unreserve(rdev->wb.wb_obj);
506
		}
506
		}
507
		radeon_bo_unref(&rdev->wb.wb_obj);
507
		radeon_bo_unref(&rdev->wb.wb_obj);
508
		rdev->wb.wb = NULL;
508
		rdev->wb.wb = NULL;
509
		rdev->wb.wb_obj = NULL;
509
		rdev->wb.wb_obj = NULL;
510
	}
510
	}
511
}
511
}
512
 
512
 
513
/**
513
/**
514
 * radeon_wb_init- Init Writeback driver info and allocate memory
514
 * radeon_wb_init- Init Writeback driver info and allocate memory
515
 *
515
 *
516
 * @rdev: radeon_device pointer
516
 * @rdev: radeon_device pointer
517
 *
517
 *
518
 * Disables Writeback and frees the Writeback memory (all asics).
518
 * Disables Writeback and frees the Writeback memory (all asics).
519
 * Used at driver startup.
519
 * Used at driver startup.
520
 * Returns 0 on success or an -error on failure.
520
 * Returns 0 on success or an -error on failure.
521
 */
521
 */
522
int radeon_wb_init(struct radeon_device *rdev)
522
int radeon_wb_init(struct radeon_device *rdev)
523
{
523
{
524
	int r;
524
	int r;
525
 
525
 
526
	if (rdev->wb.wb_obj == NULL) {
526
	if (rdev->wb.wb_obj == NULL) {
527
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
527
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
528
				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
528
				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
529
				     &rdev->wb.wb_obj);
529
				     &rdev->wb.wb_obj);
530
		if (r) {
530
		if (r) {
531
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
531
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
532
			return r;
532
			return r;
533
		}
533
		}
534
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
534
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
535
	if (unlikely(r != 0)) {
535
	if (unlikely(r != 0)) {
536
		radeon_wb_fini(rdev);
536
		radeon_wb_fini(rdev);
537
		return r;
537
		return r;
538
	}
538
	}
539
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
539
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
540
			  &rdev->wb.gpu_addr);
540
			  &rdev->wb.gpu_addr);
541
	if (r) {
541
	if (r) {
542
		radeon_bo_unreserve(rdev->wb.wb_obj);
542
		radeon_bo_unreserve(rdev->wb.wb_obj);
543
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
543
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
544
		radeon_wb_fini(rdev);
544
		radeon_wb_fini(rdev);
545
		return r;
545
		return r;
546
	}
546
	}
547
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
547
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
548
	radeon_bo_unreserve(rdev->wb.wb_obj);
548
	radeon_bo_unreserve(rdev->wb.wb_obj);
549
	if (r) {
549
	if (r) {
550
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
550
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
551
		radeon_wb_fini(rdev);
551
		radeon_wb_fini(rdev);
552
		return r;
552
		return r;
553
	}
553
	}
554
	}
554
	}
555
 
555
 
556
	/* clear wb memory */
556
	/* clear wb memory */
557
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
557
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
558
	/* disable event_write fences */
558
	/* disable event_write fences */
559
	rdev->wb.use_event = false;
559
	rdev->wb.use_event = false;
560
	/* disabled via module param */
560
	/* disabled via module param */
561
	if (radeon_no_wb == 1) {
561
	if (radeon_no_wb == 1) {
562
		rdev->wb.enabled = false;
562
		rdev->wb.enabled = false;
563
	} else {
563
	} else {
564
		if (rdev->flags & RADEON_IS_AGP) {
564
		if (rdev->flags & RADEON_IS_AGP) {
565
		/* often unreliable on AGP */
565
		/* often unreliable on AGP */
566
			rdev->wb.enabled = false;
566
			rdev->wb.enabled = false;
567
		} else if (rdev->family < CHIP_R300) {
567
		} else if (rdev->family < CHIP_R300) {
568
			/* often unreliable on pre-r300 */
568
			/* often unreliable on pre-r300 */
569
			rdev->wb.enabled = false;
569
			rdev->wb.enabled = false;
570
		} else {
570
		} else {
571
			rdev->wb.enabled = true;
571
			rdev->wb.enabled = true;
572
			/* event_write fences are only available on r600+ */
572
			/* event_write fences are only available on r600+ */
573
			if (rdev->family >= CHIP_R600) {
573
			if (rdev->family >= CHIP_R600) {
574
				rdev->wb.use_event = true;
574
				rdev->wb.use_event = true;
575
	}
575
	}
576
		}
576
		}
577
	}
577
	}
578
	/* always use writeback/events on NI, APUs */
578
	/* always use writeback/events on NI, APUs */
579
	if (rdev->family >= CHIP_PALM) {
579
	if (rdev->family >= CHIP_PALM) {
580
		rdev->wb.enabled = true;
580
		rdev->wb.enabled = true;
581
		rdev->wb.use_event = true;
581
		rdev->wb.use_event = true;
582
	}
582
	}
583
 
583
 
584
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
584
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
585
 
585
 
586
	return 0;
586
	return 0;
587
}
587
}
588
 
588
 
589
/**
589
/**
590
 * radeon_vram_location - try to find VRAM location
590
 * radeon_vram_location - try to find VRAM location
591
 * @rdev: radeon device structure holding all necessary informations
591
 * @rdev: radeon device structure holding all necessary informations
592
 * @mc: memory controller structure holding memory informations
592
 * @mc: memory controller structure holding memory informations
593
 * @base: base address at which to put VRAM
593
 * @base: base address at which to put VRAM
594
 *
594
 *
595
 * Function will place try to place VRAM at base address provided
595
 * Function will place try to place VRAM at base address provided
596
 * as parameter (which is so far either PCI aperture address or
596
 * as parameter (which is so far either PCI aperture address or
597
 * for IGP TOM base address).
597
 * for IGP TOM base address).
598
 *
598
 *
599
 * If there is not enough space to fit the unvisible VRAM in the 32bits
599
 * If there is not enough space to fit the unvisible VRAM in the 32bits
600
 * address space then we limit the VRAM size to the aperture.
600
 * address space then we limit the VRAM size to the aperture.
601
 *
601
 *
602
 * If we are using AGP and if the AGP aperture doesn't allow us to have
602
 * If we are using AGP and if the AGP aperture doesn't allow us to have
603
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
603
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
604
 * size and print a warning.
604
 * size and print a warning.
605
 *
605
 *
606
 * This function will never fails, worst case are limiting VRAM.
606
 * This function will never fails, worst case are limiting VRAM.
607
 *
607
 *
608
 * Note: GTT start, end, size should be initialized before calling this
608
 * Note: GTT start, end, size should be initialized before calling this
609
 * function on AGP platform.
609
 * function on AGP platform.
610
 *
610
 *
611
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
611
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
612
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
612
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
613
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
613
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
614
 * not IGP.
614
 * not IGP.
615
 *
615
 *
616
 * Note: we use mc_vram_size as on some board we need to program the mc to
616
 * Note: we use mc_vram_size as on some board we need to program the mc to
617
 * cover the whole aperture even if VRAM size is inferior to aperture size
617
 * cover the whole aperture even if VRAM size is inferior to aperture size
618
 * Novell bug 204882 + along with lots of ubuntu ones
618
 * Novell bug 204882 + along with lots of ubuntu ones
619
 *
619
 *
620
 * Note: when limiting vram it's safe to overwritte real_vram_size because
620
 * Note: when limiting vram it's safe to overwritte real_vram_size because
621
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
621
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
622
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
622
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
623
 * ones)
623
 * ones)
624
 *
624
 *
625
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
625
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
626
 * explicitly check for that thought.
626
 * explicitly check for that thought.
627
 *
627
 *
628
 * FIXME: when reducing VRAM size align new size on power of 2.
628
 * FIXME: when reducing VRAM size align new size on power of 2.
629
 */
629
 */
630
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
630
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
631
{
631
{
632
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
632
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
633
 
633
 
634
	mc->vram_start = base;
634
	mc->vram_start = base;
635
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
635
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
636
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
636
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
637
		mc->real_vram_size = mc->aper_size;
637
		mc->real_vram_size = mc->aper_size;
638
		mc->mc_vram_size = mc->aper_size;
638
		mc->mc_vram_size = mc->aper_size;
639
	}
639
	}
640
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
640
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
641
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
641
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
642
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
642
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
643
		mc->real_vram_size = mc->aper_size;
643
		mc->real_vram_size = mc->aper_size;
644
		mc->mc_vram_size = mc->aper_size;
644
		mc->mc_vram_size = mc->aper_size;
645
		}
645
		}
646
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
646
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
647
	if (limit && limit < mc->real_vram_size)
647
	if (limit && limit < mc->real_vram_size)
648
		mc->real_vram_size = limit;
648
		mc->real_vram_size = limit;
649
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
649
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
650
			mc->mc_vram_size >> 20, mc->vram_start,
650
			mc->mc_vram_size >> 20, mc->vram_start,
651
			mc->vram_end, mc->real_vram_size >> 20);
651
			mc->vram_end, mc->real_vram_size >> 20);
652
}
652
}
653
 
653
 
654
/**
654
/**
655
 * radeon_gtt_location - try to find GTT location
655
 * radeon_gtt_location - try to find GTT location
656
 * @rdev: radeon device structure holding all necessary informations
656
 * @rdev: radeon device structure holding all necessary informations
657
 * @mc: memory controller structure holding memory informations
657
 * @mc: memory controller structure holding memory informations
658
 *
658
 *
659
 * Function will place try to place GTT before or after VRAM.
659
 * Function will place try to place GTT before or after VRAM.
660
 *
660
 *
661
 * If GTT size is bigger than space left then we ajust GTT size.
661
 * If GTT size is bigger than space left then we ajust GTT size.
662
 * Thus function will never fails.
662
 * Thus function will never fails.
663
 *
663
 *
664
 * FIXME: when reducing GTT size align new size on power of 2.
664
 * FIXME: when reducing GTT size align new size on power of 2.
665
 */
665
 */
666
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
666
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
667
{
667
{
668
	u64 size_af, size_bf;
668
	u64 size_af, size_bf;
669
 
669
 
670
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
670
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
671
	size_bf = mc->vram_start & ~mc->gtt_base_align;
671
	size_bf = mc->vram_start & ~mc->gtt_base_align;
672
	if (size_bf > size_af) {
672
	if (size_bf > size_af) {
673
		if (mc->gtt_size > size_bf) {
673
		if (mc->gtt_size > size_bf) {
674
			dev_warn(rdev->dev, "limiting GTT\n");
674
			dev_warn(rdev->dev, "limiting GTT\n");
675
			mc->gtt_size = size_bf;
675
			mc->gtt_size = size_bf;
676
		}
676
		}
677
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
677
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
678
	} else {
678
	} else {
679
		if (mc->gtt_size > size_af) {
679
		if (mc->gtt_size > size_af) {
680
			dev_warn(rdev->dev, "limiting GTT\n");
680
			dev_warn(rdev->dev, "limiting GTT\n");
681
			mc->gtt_size = size_af;
681
			mc->gtt_size = size_af;
682
		}
682
		}
683
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
683
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
684
	}
684
	}
685
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
685
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
686
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
686
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
687
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
687
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
688
}
688
}
689
 
689
 
690
/*
690
/*
691
 * GPU helpers function.
691
 * GPU helpers function.
692
 */
692
 */
693
/**
693
/**
694
 * radeon_card_posted - check if the hw has already been initialized
694
 * radeon_card_posted - check if the hw has already been initialized
695
 *
695
 *
696
 * @rdev: radeon_device pointer
696
 * @rdev: radeon_device pointer
697
 *
697
 *
698
 * Check if the asic has been initialized (all asics).
698
 * Check if the asic has been initialized (all asics).
699
 * Used at driver startup.
699
 * Used at driver startup.
700
 * Returns true if initialized or false if not.
700
 * Returns true if initialized or false if not.
701
 */
701
 */
702
bool radeon_card_posted(struct radeon_device *rdev)
702
bool radeon_card_posted(struct radeon_device *rdev)
703
{
703
{
704
	uint32_t reg;
704
	uint32_t reg;
705
 
705
 
706
	if (ASIC_IS_NODCE(rdev))
706
	if (ASIC_IS_NODCE(rdev))
707
		goto check_memsize;
707
		goto check_memsize;
708
 
708
 
709
	/* first check CRTCs */
709
	/* first check CRTCs */
710
	if (ASIC_IS_DCE4(rdev)) {
710
	if (ASIC_IS_DCE4(rdev)) {
711
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
711
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
712
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
712
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
713
			if (rdev->num_crtc >= 4) {
713
			if (rdev->num_crtc >= 4) {
714
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
714
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
715
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
715
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
716
			}
716
			}
717
			if (rdev->num_crtc >= 6) {
717
			if (rdev->num_crtc >= 6) {
718
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
718
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
719
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
719
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
720
			}
720
			}
721
		if (reg & EVERGREEN_CRTC_MASTER_EN)
721
		if (reg & EVERGREEN_CRTC_MASTER_EN)
722
			return true;
722
			return true;
723
	} else if (ASIC_IS_AVIVO(rdev)) {
723
	} else if (ASIC_IS_AVIVO(rdev)) {
724
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
724
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
725
		      RREG32(AVIVO_D2CRTC_CONTROL);
725
		      RREG32(AVIVO_D2CRTC_CONTROL);
726
		if (reg & AVIVO_CRTC_EN) {
726
		if (reg & AVIVO_CRTC_EN) {
727
			return true;
727
			return true;
728
		}
728
		}
729
	} else {
729
	} else {
730
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
730
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
731
		      RREG32(RADEON_CRTC2_GEN_CNTL);
731
		      RREG32(RADEON_CRTC2_GEN_CNTL);
732
		if (reg & RADEON_CRTC_EN) {
732
		if (reg & RADEON_CRTC_EN) {
733
			return true;
733
			return true;
734
		}
734
		}
735
	}
735
	}
736
 
736
 
737
check_memsize:
737
check_memsize:
738
	/* then check MEM_SIZE, in case the crtcs are off */
738
	/* then check MEM_SIZE, in case the crtcs are off */
739
	if (rdev->family >= CHIP_R600)
739
	if (rdev->family >= CHIP_R600)
740
		reg = RREG32(R600_CONFIG_MEMSIZE);
740
		reg = RREG32(R600_CONFIG_MEMSIZE);
741
	else
741
	else
742
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
742
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
743
 
743
 
744
	if (reg)
744
	if (reg)
745
		return true;
745
		return true;
746
 
746
 
747
	return false;
747
	return false;
748
 
748
 
749
}
749
}
750
 
750
 
751
/**
751
/**
752
 * radeon_update_bandwidth_info - update display bandwidth params
752
 * radeon_update_bandwidth_info - update display bandwidth params
753
 *
753
 *
754
 * @rdev: radeon_device pointer
754
 * @rdev: radeon_device pointer
755
 *
755
 *
756
 * Used when sclk/mclk are switched or display modes are set.
756
 * Used when sclk/mclk are switched or display modes are set.
757
 * params are used to calculate display watermarks (all asics)
757
 * params are used to calculate display watermarks (all asics)
758
 */
758
 */
759
void radeon_update_bandwidth_info(struct radeon_device *rdev)
759
void radeon_update_bandwidth_info(struct radeon_device *rdev)
760
{
760
{
761
	fixed20_12 a;
761
	fixed20_12 a;
762
	u32 sclk = rdev->pm.current_sclk;
762
	u32 sclk = rdev->pm.current_sclk;
763
	u32 mclk = rdev->pm.current_mclk;
763
	u32 mclk = rdev->pm.current_mclk;
764
 
764
 
765
	/* sclk/mclk in Mhz */
765
	/* sclk/mclk in Mhz */
766
		a.full = dfixed_const(100);
766
		a.full = dfixed_const(100);
767
		rdev->pm.sclk.full = dfixed_const(sclk);
767
		rdev->pm.sclk.full = dfixed_const(sclk);
768
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
768
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
769
		rdev->pm.mclk.full = dfixed_const(mclk);
769
		rdev->pm.mclk.full = dfixed_const(mclk);
770
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
770
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
771
 
771
 
772
	if (rdev->flags & RADEON_IS_IGP) {
772
	if (rdev->flags & RADEON_IS_IGP) {
773
		a.full = dfixed_const(16);
773
		a.full = dfixed_const(16);
774
		/* core_bandwidth = sclk(Mhz) * 16 */
774
		/* core_bandwidth = sclk(Mhz) * 16 */
775
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
775
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
776
	}
776
	}
777
}
777
}
778
 
778
 
779
/**
779
/**
780
 * radeon_boot_test_post_card - check and possibly initialize the hw
780
 * radeon_boot_test_post_card - check and possibly initialize the hw
781
 *
781
 *
782
 * @rdev: radeon_device pointer
782
 * @rdev: radeon_device pointer
783
 *
783
 *
784
 * Check if the asic is initialized and if not, attempt to initialize
784
 * Check if the asic is initialized and if not, attempt to initialize
785
 * it (all asics).
785
 * it (all asics).
786
 * Returns true if initialized or false if not.
786
 * Returns true if initialized or false if not.
787
 */
787
 */
788
bool radeon_boot_test_post_card(struct radeon_device *rdev)
788
bool radeon_boot_test_post_card(struct radeon_device *rdev)
789
{
789
{
790
	if (radeon_card_posted(rdev))
790
	if (radeon_card_posted(rdev))
791
		return true;
791
		return true;
792
 
792
 
793
	if (rdev->bios) {
793
	if (rdev->bios) {
794
		DRM_INFO("GPU not posted. posting now...\n");
794
		DRM_INFO("GPU not posted. posting now...\n");
795
		if (rdev->is_atom_bios)
795
		if (rdev->is_atom_bios)
796
			atom_asic_init(rdev->mode_info.atom_context);
796
			atom_asic_init(rdev->mode_info.atom_context);
797
		else
797
		else
798
			radeon_combios_asic_init(rdev->ddev);
798
			radeon_combios_asic_init(rdev->ddev);
799
		return true;
799
		return true;
800
	} else {
800
	} else {
801
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
801
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
802
		return false;
802
		return false;
803
	}
803
	}
804
}
804
}
805
 
805
 
806
/**
806
/**
807
 * radeon_dummy_page_init - init dummy page used by the driver
807
 * radeon_dummy_page_init - init dummy page used by the driver
808
 *
808
 *
809
 * @rdev: radeon_device pointer
809
 * @rdev: radeon_device pointer
810
 *
810
 *
811
 * Allocate the dummy page used by the driver (all asics).
811
 * Allocate the dummy page used by the driver (all asics).
812
 * This dummy page is used by the driver as a filler for gart entries
812
 * This dummy page is used by the driver as a filler for gart entries
813
 * when pages are taken out of the GART
813
 * when pages are taken out of the GART
814
 * Returns 0 on sucess, -ENOMEM on failure.
814
 * Returns 0 on sucess, -ENOMEM on failure.
815
 */
815
 */
816
int radeon_dummy_page_init(struct radeon_device *rdev)
816
int radeon_dummy_page_init(struct radeon_device *rdev)
817
{
817
{
818
	if (rdev->dummy_page.page)
818
	if (rdev->dummy_page.page)
819
		return 0;
819
		return 0;
820
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
820
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
821
	if (rdev->dummy_page.page == NULL)
821
	if (rdev->dummy_page.page == NULL)
822
		return -ENOMEM;
822
		return -ENOMEM;
823
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
823
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
824
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
824
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
825
	return 0;
825
	return 0;
826
}
826
}
827
 
827
 
828
/**
828
/**
829
 * radeon_dummy_page_fini - free dummy page used by the driver
829
 * radeon_dummy_page_fini - free dummy page used by the driver
830
 *
830
 *
831
 * @rdev: radeon_device pointer
831
 * @rdev: radeon_device pointer
832
 *
832
 *
833
 * Frees the dummy page used by the driver (all asics).
833
 * Frees the dummy page used by the driver (all asics).
834
 */
834
 */
835
void radeon_dummy_page_fini(struct radeon_device *rdev)
835
void radeon_dummy_page_fini(struct radeon_device *rdev)
836
{
836
{
837
	if (rdev->dummy_page.page == NULL)
837
	if (rdev->dummy_page.page == NULL)
838
		return;
838
		return;
839
 
839
 
840
	rdev->dummy_page.page = NULL;
840
	rdev->dummy_page.page = NULL;
841
}
841
}
842
 
842
 
843
 
843
 
844
/* ATOM accessor methods */
844
/* ATOM accessor methods */
845
/*
845
/*
846
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
846
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
847
 * driver registers callbacks to access registers and the interpreter
847
 * driver registers callbacks to access registers and the interpreter
848
 * in the driver parses the tables and executes then to program specific
848
 * in the driver parses the tables and executes then to program specific
849
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
849
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
850
 * atombios.h, and atom.c
850
 * atombios.h, and atom.c
851
 */
851
 */
852
 
852
 
853
/**
853
/**
854
 * cail_pll_read - read PLL register
854
 * cail_pll_read - read PLL register
855
 *
855
 *
856
 * @info: atom card_info pointer
856
 * @info: atom card_info pointer
857
 * @reg: PLL register offset
857
 * @reg: PLL register offset
858
 *
858
 *
859
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
859
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
860
 * Returns the value of the PLL register.
860
 * Returns the value of the PLL register.
861
 */
861
 */
862
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
862
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
863
{
863
{
864
    struct radeon_device *rdev = info->dev->dev_private;
864
    struct radeon_device *rdev = info->dev->dev_private;
865
    uint32_t r;
865
    uint32_t r;
866
 
866
 
867
    r = rdev->pll_rreg(rdev, reg);
867
    r = rdev->pll_rreg(rdev, reg);
868
    return r;
868
    return r;
869
}
869
}
870
 
870
 
871
/**
871
/**
872
 * cail_pll_write - write PLL register
872
 * cail_pll_write - write PLL register
873
 *
873
 *
874
 * @info: atom card_info pointer
874
 * @info: atom card_info pointer
875
 * @reg: PLL register offset
875
 * @reg: PLL register offset
876
 * @val: value to write to the pll register
876
 * @val: value to write to the pll register
877
 *
877
 *
878
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
878
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
879
 */
879
 */
880
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
880
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
881
{
881
{
882
    struct radeon_device *rdev = info->dev->dev_private;
882
    struct radeon_device *rdev = info->dev->dev_private;
883
 
883
 
884
    rdev->pll_wreg(rdev, reg, val);
884
    rdev->pll_wreg(rdev, reg, val);
885
}
885
}
886
 
886
 
887
/**
887
/**
888
 * cail_mc_read - read MC (Memory Controller) register
888
 * cail_mc_read - read MC (Memory Controller) register
889
 *
889
 *
890
 * @info: atom card_info pointer
890
 * @info: atom card_info pointer
891
 * @reg: MC register offset
891
 * @reg: MC register offset
892
 *
892
 *
893
 * Provides an MC register accessor for the atom interpreter (r4xx+).
893
 * Provides an MC register accessor for the atom interpreter (r4xx+).
894
 * Returns the value of the MC register.
894
 * Returns the value of the MC register.
895
 */
895
 */
896
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
896
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
897
{
897
{
898
    struct radeon_device *rdev = info->dev->dev_private;
898
    struct radeon_device *rdev = info->dev->dev_private;
899
    uint32_t r;
899
    uint32_t r;
900
 
900
 
901
    r = rdev->mc_rreg(rdev, reg);
901
    r = rdev->mc_rreg(rdev, reg);
902
    return r;
902
    return r;
903
}
903
}
904
 
904
 
905
/**
905
/**
906
 * cail_mc_write - write MC (Memory Controller) register
906
 * cail_mc_write - write MC (Memory Controller) register
907
 *
907
 *
908
 * @info: atom card_info pointer
908
 * @info: atom card_info pointer
909
 * @reg: MC register offset
909
 * @reg: MC register offset
910
 * @val: value to write to the pll register
910
 * @val: value to write to the pll register
911
 *
911
 *
912
 * Provides a MC register accessor for the atom interpreter (r4xx+).
912
 * Provides a MC register accessor for the atom interpreter (r4xx+).
913
 */
913
 */
914
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
914
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
915
{
915
{
916
    struct radeon_device *rdev = info->dev->dev_private;
916
    struct radeon_device *rdev = info->dev->dev_private;
917
 
917
 
918
    rdev->mc_wreg(rdev, reg, val);
918
    rdev->mc_wreg(rdev, reg, val);
919
}
919
}
920
 
920
 
921
/**
921
/**
922
 * cail_reg_write - write MMIO register
922
 * cail_reg_write - write MMIO register
923
 *
923
 *
924
 * @info: atom card_info pointer
924
 * @info: atom card_info pointer
925
 * @reg: MMIO register offset
925
 * @reg: MMIO register offset
926
 * @val: value to write to the pll register
926
 * @val: value to write to the pll register
927
 *
927
 *
928
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
928
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
929
 */
929
 */
930
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
930
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
931
{
931
{
932
    struct radeon_device *rdev = info->dev->dev_private;
932
    struct radeon_device *rdev = info->dev->dev_private;
933
 
933
 
934
    WREG32(reg*4, val);
934
    WREG32(reg*4, val);
935
}
935
}
936
 
936
 
937
/**
937
/**
938
 * cail_reg_read - read MMIO register
938
 * cail_reg_read - read MMIO register
939
 *
939
 *
940
 * @info: atom card_info pointer
940
 * @info: atom card_info pointer
941
 * @reg: MMIO register offset
941
 * @reg: MMIO register offset
942
 *
942
 *
943
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
943
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
944
 * Returns the value of the MMIO register.
944
 * Returns the value of the MMIO register.
945
 */
945
 */
946
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
946
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
947
{
947
{
948
    struct radeon_device *rdev = info->dev->dev_private;
948
    struct radeon_device *rdev = info->dev->dev_private;
949
    uint32_t r;
949
    uint32_t r;
950
 
950
 
951
    r = RREG32(reg*4);
951
    r = RREG32(reg*4);
952
    return r;
952
    return r;
953
}
953
}
954
 
954
 
955
/**
955
/**
956
 * cail_ioreg_write - write IO register
956
 * cail_ioreg_write - write IO register
957
 *
957
 *
958
 * @info: atom card_info pointer
958
 * @info: atom card_info pointer
959
 * @reg: IO register offset
959
 * @reg: IO register offset
960
 * @val: value to write to the pll register
960
 * @val: value to write to the pll register
961
 *
961
 *
962
 * Provides a IO register accessor for the atom interpreter (r4xx+).
962
 * Provides a IO register accessor for the atom interpreter (r4xx+).
963
 */
963
 */
964
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
964
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
965
{
965
{
966
	struct radeon_device *rdev = info->dev->dev_private;
966
	struct radeon_device *rdev = info->dev->dev_private;
967
 
967
 
968
	WREG32_IO(reg*4, val);
968
	WREG32_IO(reg*4, val);
969
}
969
}
970
 
970
 
971
/**
971
/**
972
 * cail_ioreg_read - read IO register
972
 * cail_ioreg_read - read IO register
973
 *
973
 *
974
 * @info: atom card_info pointer
974
 * @info: atom card_info pointer
975
 * @reg: IO register offset
975
 * @reg: IO register offset
976
 *
976
 *
977
 * Provides an IO register accessor for the atom interpreter (r4xx+).
977
 * Provides an IO register accessor for the atom interpreter (r4xx+).
978
 * Returns the value of the IO register.
978
 * Returns the value of the IO register.
979
 */
979
 */
980
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
980
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
981
{
981
{
982
	struct radeon_device *rdev = info->dev->dev_private;
982
	struct radeon_device *rdev = info->dev->dev_private;
983
	uint32_t r;
983
	uint32_t r;
984
 
984
 
985
	r = RREG32_IO(reg*4);
985
	r = RREG32_IO(reg*4);
986
	return r;
986
	return r;
987
}
987
}
988
 
988
 
989
/**
989
/**
990
 * radeon_atombios_init - init the driver info and callbacks for atombios
990
 * radeon_atombios_init - init the driver info and callbacks for atombios
991
 *
991
 *
992
 * @rdev: radeon_device pointer
992
 * @rdev: radeon_device pointer
993
 *
993
 *
994
 * Initializes the driver info and register access callbacks for the
994
 * Initializes the driver info and register access callbacks for the
995
 * ATOM interpreter (r4xx+).
995
 * ATOM interpreter (r4xx+).
996
 * Returns 0 on sucess, -ENOMEM on failure.
996
 * Returns 0 on sucess, -ENOMEM on failure.
997
 * Called at driver startup.
997
 * Called at driver startup.
998
 */
998
 */
999
int radeon_atombios_init(struct radeon_device *rdev)
999
int radeon_atombios_init(struct radeon_device *rdev)
1000
{
1000
{
1001
	struct card_info *atom_card_info =
1001
	struct card_info *atom_card_info =
1002
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1002
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1003
 
1003
 
1004
	if (!atom_card_info)
1004
	if (!atom_card_info)
1005
		return -ENOMEM;
1005
		return -ENOMEM;
1006
 
1006
 
1007
	rdev->mode_info.atom_card_info = atom_card_info;
1007
	rdev->mode_info.atom_card_info = atom_card_info;
1008
	atom_card_info->dev = rdev->ddev;
1008
	atom_card_info->dev = rdev->ddev;
1009
	atom_card_info->reg_read = cail_reg_read;
1009
	atom_card_info->reg_read = cail_reg_read;
1010
	atom_card_info->reg_write = cail_reg_write;
1010
	atom_card_info->reg_write = cail_reg_write;
1011
	/* needed for iio ops */
1011
	/* needed for iio ops */
1012
	if (rdev->rio_mem) {
1012
	if (rdev->rio_mem) {
1013
		atom_card_info->ioreg_read = cail_ioreg_read;
1013
		atom_card_info->ioreg_read = cail_ioreg_read;
1014
		atom_card_info->ioreg_write = cail_ioreg_write;
1014
		atom_card_info->ioreg_write = cail_ioreg_write;
1015
	} else {
1015
	} else {
1016
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1016
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1017
		atom_card_info->ioreg_read = cail_reg_read;
1017
		atom_card_info->ioreg_read = cail_reg_read;
1018
		atom_card_info->ioreg_write = cail_reg_write;
1018
		atom_card_info->ioreg_write = cail_reg_write;
1019
	}
1019
	}
1020
	atom_card_info->mc_read = cail_mc_read;
1020
	atom_card_info->mc_read = cail_mc_read;
1021
	atom_card_info->mc_write = cail_mc_write;
1021
	atom_card_info->mc_write = cail_mc_write;
1022
	atom_card_info->pll_read = cail_pll_read;
1022
	atom_card_info->pll_read = cail_pll_read;
1023
	atom_card_info->pll_write = cail_pll_write;
1023
	atom_card_info->pll_write = cail_pll_write;
1024
 
1024
 
1025
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1025
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1026
	if (!rdev->mode_info.atom_context) {
1026
	if (!rdev->mode_info.atom_context) {
1027
		radeon_atombios_fini(rdev);
1027
		radeon_atombios_fini(rdev);
1028
		return -ENOMEM;
1028
		return -ENOMEM;
1029
	}
1029
	}
1030
 
1030
 
1031
	mutex_init(&rdev->mode_info.atom_context->mutex);
1031
	mutex_init(&rdev->mode_info.atom_context->mutex);
1032
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1032
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1033
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1033
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1034
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1034
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1035
    return 0;
1035
    return 0;
1036
}
1036
}
1037
 
1037
 
1038
/**
1038
/**
1039
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1039
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1040
 *
1040
 *
1041
 * @rdev: radeon_device pointer
1041
 * @rdev: radeon_device pointer
1042
 *
1042
 *
1043
 * Frees the driver info and register access callbacks for the ATOM
1043
 * Frees the driver info and register access callbacks for the ATOM
1044
 * interpreter (r4xx+).
1044
 * interpreter (r4xx+).
1045
 * Called at driver shutdown.
1045
 * Called at driver shutdown.
1046
 */
1046
 */
1047
void radeon_atombios_fini(struct radeon_device *rdev)
1047
void radeon_atombios_fini(struct radeon_device *rdev)
1048
{
1048
{
1049
	if (rdev->mode_info.atom_context) {
1049
	if (rdev->mode_info.atom_context) {
1050
		kfree(rdev->mode_info.atom_context->scratch);
1050
		kfree(rdev->mode_info.atom_context->scratch);
1051
	}
1051
	}
1052
	kfree(rdev->mode_info.atom_context);
1052
	kfree(rdev->mode_info.atom_context);
1053
	rdev->mode_info.atom_context = NULL;
1053
	rdev->mode_info.atom_context = NULL;
1054
	kfree(rdev->mode_info.atom_card_info);
1054
	kfree(rdev->mode_info.atom_card_info);
1055
	rdev->mode_info.atom_card_info = NULL;
1055
	rdev->mode_info.atom_card_info = NULL;
1056
}
1056
}
1057
 
1057
 
1058
/* COMBIOS */
1058
/* COMBIOS */
1059
/*
1059
/*
1060
 * COMBIOS is the bios format prior to ATOM. It provides
1060
 * COMBIOS is the bios format prior to ATOM. It provides
1061
 * command tables similar to ATOM, but doesn't have a unified
1061
 * command tables similar to ATOM, but doesn't have a unified
1062
 * parser.  See radeon_combios.c
1062
 * parser.  See radeon_combios.c
1063
 */
1063
 */
1064
 
1064
 
1065
/**
1065
/**
1066
 * radeon_combios_init - init the driver info for combios
1066
 * radeon_combios_init - init the driver info for combios
1067
 *
1067
 *
1068
 * @rdev: radeon_device pointer
1068
 * @rdev: radeon_device pointer
1069
 *
1069
 *
1070
 * Initializes the driver info for combios (r1xx-r3xx).
1070
 * Initializes the driver info for combios (r1xx-r3xx).
1071
 * Returns 0 on sucess.
1071
 * Returns 0 on sucess.
1072
 * Called at driver startup.
1072
 * Called at driver startup.
1073
 */
1073
 */
1074
int radeon_combios_init(struct radeon_device *rdev)
1074
int radeon_combios_init(struct radeon_device *rdev)
1075
{
1075
{
1076
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1076
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1077
	return 0;
1077
	return 0;
1078
}
1078
}
1079
 
1079
 
1080
/**
1080
/**
1081
 * radeon_combios_fini - free the driver info for combios
1081
 * radeon_combios_fini - free the driver info for combios
1082
 *
1082
 *
1083
 * @rdev: radeon_device pointer
1083
 * @rdev: radeon_device pointer
1084
 *
1084
 *
1085
 * Frees the driver info for combios (r1xx-r3xx).
1085
 * Frees the driver info for combios (r1xx-r3xx).
1086
 * Called at driver shutdown.
1086
 * Called at driver shutdown.
1087
 */
1087
 */
1088
void radeon_combios_fini(struct radeon_device *rdev)
1088
void radeon_combios_fini(struct radeon_device *rdev)
1089
{
1089
{
1090
}
1090
}
1091
 
1091
 
1092
/* if we get transitioned to only one device, take VGA back */
1092
/* if we get transitioned to only one device, take VGA back */
1093
/**
1093
/**
1094
 * radeon_vga_set_decode - enable/disable vga decode
1094
 * radeon_vga_set_decode - enable/disable vga decode
1095
 *
1095
 *
1096
 * @cookie: radeon_device pointer
1096
 * @cookie: radeon_device pointer
1097
 * @state: enable/disable vga decode
1097
 * @state: enable/disable vga decode
1098
 *
1098
 *
1099
 * Enable/disable vga decode (all asics).
1099
 * Enable/disable vga decode (all asics).
1100
 * Returns VGA resource flags.
1100
 * Returns VGA resource flags.
1101
 */
1101
 */
1102
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1102
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1103
{
1103
{
1104
	struct radeon_device *rdev = cookie;
1104
	struct radeon_device *rdev = cookie;
1105
	radeon_vga_set_state(rdev, state);
1105
	radeon_vga_set_state(rdev, state);
1106
	if (state)
1106
	if (state)
1107
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1107
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1108
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1108
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1109
	else
1109
	else
1110
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1110
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1111
}
1111
}
1112
 
1112
 
1113
/**
1113
/**
1114
 * radeon_check_pot_argument - check that argument is a power of two
1114
 * radeon_check_pot_argument - check that argument is a power of two
1115
 *
1115
 *
1116
 * @arg: value to check
1116
 * @arg: value to check
1117
 *
1117
 *
1118
 * Validates that a certain argument is a power of two (all asics).
1118
 * Validates that a certain argument is a power of two (all asics).
1119
 * Returns true if argument is valid.
1119
 * Returns true if argument is valid.
1120
 */
1120
 */
1121
static bool radeon_check_pot_argument(int arg)
1121
static bool radeon_check_pot_argument(int arg)
1122
{
1122
{
1123
	return (arg & (arg - 1)) == 0;
1123
	return (arg & (arg - 1)) == 0;
1124
}
1124
}
1125
 
1125
 
1126
/**
1126
/**
1127
 * radeon_check_arguments - validate module params
1127
 * radeon_check_arguments - validate module params
1128
 *
1128
 *
1129
 * @rdev: radeon_device pointer
1129
 * @rdev: radeon_device pointer
1130
 *
1130
 *
1131
 * Validates certain module parameters and updates
1131
 * Validates certain module parameters and updates
1132
 * the associated values used by the driver (all asics).
1132
 * the associated values used by the driver (all asics).
1133
 */
1133
 */
1134
static void radeon_check_arguments(struct radeon_device *rdev)
1134
static void radeon_check_arguments(struct radeon_device *rdev)
1135
{
1135
{
1136
	/* vramlimit must be a power of two */
1136
	/* vramlimit must be a power of two */
1137
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1137
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1138
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1138
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1139
				radeon_vram_limit);
1139
				radeon_vram_limit);
1140
		radeon_vram_limit = 0;
1140
		radeon_vram_limit = 0;
1141
	}
1141
	}
1142
 
1142
 
1143
	if (radeon_gart_size == -1) {
1143
	if (radeon_gart_size == -1) {
1144
		/* default to a larger gart size on newer asics */
1144
		/* default to a larger gart size on newer asics */
1145
		if (rdev->family >= CHIP_RV770)
1145
		if (rdev->family >= CHIP_RV770)
1146
			radeon_gart_size = 1024;
1146
			radeon_gart_size = 1024;
1147
		else
1147
		else
1148
			radeon_gart_size = 512;
1148
			radeon_gart_size = 512;
1149
	}
1149
	}
1150
	/* gtt size must be power of two and greater or equal to 32M */
1150
	/* gtt size must be power of two and greater or equal to 32M */
1151
	if (radeon_gart_size < 32) {
1151
	if (radeon_gart_size < 32) {
1152
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1152
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1153
				radeon_gart_size);
1153
				radeon_gart_size);
1154
		if (rdev->family >= CHIP_RV770)
1154
		if (rdev->family >= CHIP_RV770)
1155
			radeon_gart_size = 1024;
1155
			radeon_gart_size = 1024;
1156
		else
1156
		else
1157
		radeon_gart_size = 512;
1157
		radeon_gart_size = 512;
1158
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1158
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1159
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1159
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1160
				radeon_gart_size);
1160
				radeon_gart_size);
1161
		if (rdev->family >= CHIP_RV770)
1161
		if (rdev->family >= CHIP_RV770)
1162
			radeon_gart_size = 1024;
1162
			radeon_gart_size = 1024;
1163
		else
1163
		else
1164
		radeon_gart_size = 512;
1164
		radeon_gart_size = 512;
1165
	}
1165
	}
1166
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1166
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1167
 
1167
 
1168
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1168
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1169
	switch (radeon_agpmode) {
1169
	switch (radeon_agpmode) {
1170
	case -1:
1170
	case -1:
1171
	case 0:
1171
	case 0:
1172
	case 1:
1172
	case 1:
1173
	case 2:
1173
	case 2:
1174
	case 4:
1174
	case 4:
1175
	case 8:
1175
	case 8:
1176
		break;
1176
		break;
1177
	default:
1177
	default:
1178
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1178
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1179
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1179
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1180
		radeon_agpmode = 0;
1180
		radeon_agpmode = 0;
1181
		break;
1181
		break;
1182
	}
1182
	}
1183
 
1183
 
1184
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1184
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1185
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1185
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1186
			 radeon_vm_size);
1186
			 radeon_vm_size);
1187
		radeon_vm_size = 4;
1187
		radeon_vm_size = 4;
1188
	}
1188
	}
1189
 
1189
 
1190
	if (radeon_vm_size < 1) {
1190
	if (radeon_vm_size < 1) {
1191
		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1191
		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1192
			 radeon_vm_size);
1192
			 radeon_vm_size);
1193
		radeon_vm_size = 4;
1193
		radeon_vm_size = 4;
1194
	}
1194
	}
1195
 
1195
 
1196
       /*
1196
       /*
1197
        * Max GPUVM size for Cayman, SI and CI are 40 bits.
1197
        * Max GPUVM size for Cayman, SI and CI are 40 bits.
1198
        */
1198
        */
1199
	if (radeon_vm_size > 1024) {
1199
	if (radeon_vm_size > 1024) {
1200
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1200
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1201
			 radeon_vm_size);
1201
			 radeon_vm_size);
1202
		radeon_vm_size = 4;
1202
		radeon_vm_size = 4;
1203
	}
1203
	}
1204
 
1204
 
1205
	/* defines number of bits in page table versus page directory,
1205
	/* defines number of bits in page table versus page directory,
1206
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1206
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1207
	 * page table and the remaining bits are in the page directory */
1207
	 * page table and the remaining bits are in the page directory */
1208
	if (radeon_vm_block_size == -1) {
1208
	if (radeon_vm_block_size == -1) {
1209
 
1209
 
1210
		/* Total bits covered by PD + PTs */
1210
		/* Total bits covered by PD + PTs */
1211
		unsigned bits = ilog2(radeon_vm_size) + 18;
1211
		unsigned bits = ilog2(radeon_vm_size) + 18;
1212
 
1212
 
1213
		/* Make sure the PD is 4K in size up to 8GB address space.
1213
		/* Make sure the PD is 4K in size up to 8GB address space.
1214
		   Above that split equal between PD and PTs */
1214
		   Above that split equal between PD and PTs */
1215
		if (radeon_vm_size <= 8)
1215
		if (radeon_vm_size <= 8)
1216
			radeon_vm_block_size = bits - 9;
1216
			radeon_vm_block_size = bits - 9;
1217
		else
1217
		else
1218
			radeon_vm_block_size = (bits + 3) / 2;
1218
			radeon_vm_block_size = (bits + 3) / 2;
1219
 
1219
 
1220
	} else if (radeon_vm_block_size < 9) {
1220
	} else if (radeon_vm_block_size < 9) {
1221
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1221
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1222
			 radeon_vm_block_size);
1222
			 radeon_vm_block_size);
1223
		radeon_vm_block_size = 9;
1223
		radeon_vm_block_size = 9;
1224
	}
1224
	}
1225
 
1225
 
1226
	if (radeon_vm_block_size > 24 ||
1226
	if (radeon_vm_block_size > 24 ||
1227
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1227
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1228
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1228
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1229
			 radeon_vm_block_size);
1229
			 radeon_vm_block_size);
1230
		radeon_vm_block_size = 9;
1230
		radeon_vm_block_size = 9;
1231
	}
1231
	}
1232
}
1232
}
1233
 
1233
 
1234
/**
1234
/**
1235
 * radeon_device_init - initialize the driver
1235
 * radeon_device_init - initialize the driver
1236
 *
1236
 *
1237
 * @rdev: radeon_device pointer
1237
 * @rdev: radeon_device pointer
1238
 * @pdev: drm dev pointer
1238
 * @pdev: drm dev pointer
1239
 * @pdev: pci dev pointer
1239
 * @pdev: pci dev pointer
1240
 * @flags: driver flags
1240
 * @flags: driver flags
1241
 *
1241
 *
1242
 * Initializes the driver info and hw (all asics).
1242
 * Initializes the driver info and hw (all asics).
1243
 * Returns 0 for success or an error on failure.
1243
 * Returns 0 for success or an error on failure.
1244
 * Called at driver startup.
1244
 * Called at driver startup.
1245
 */
1245
 */
1246
int radeon_device_init(struct radeon_device *rdev,
1246
int radeon_device_init(struct radeon_device *rdev,
1247
               struct drm_device *ddev,
1247
               struct drm_device *ddev,
1248
               struct pci_dev *pdev,
1248
               struct pci_dev *pdev,
1249
               uint32_t flags)
1249
               uint32_t flags)
1250
{
1250
{
1251
	int r, i;
1251
	int r, i;
1252
	int dma_bits;
1252
	int dma_bits;
1253
	bool runtime = false;
1253
	bool runtime = false;
1254
 
1254
 
1255
    rdev->shutdown = false;
1255
    rdev->shutdown = false;
1256
	rdev->dev = &pdev->dev;
1256
	rdev->dev = &pdev->dev;
1257
    rdev->ddev = ddev;
1257
    rdev->ddev = ddev;
1258
    rdev->pdev = pdev;
1258
    rdev->pdev = pdev;
1259
    rdev->flags = flags;
1259
    rdev->flags = flags;
1260
    rdev->family = flags & RADEON_FAMILY_MASK;
1260
    rdev->family = flags & RADEON_FAMILY_MASK;
1261
    rdev->is_atom_bios = false;
1261
    rdev->is_atom_bios = false;
1262
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1262
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1263
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1263
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1264
	rdev->accel_working = false;
1264
	rdev->accel_working = false;
1265
	/* set up ring ids */
1265
	/* set up ring ids */
1266
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1266
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1267
		rdev->ring[i].idx = i;
1267
		rdev->ring[i].idx = i;
1268
	}
1268
	}
1269
	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1269
	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1270
 
1270
 
1271
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1271
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1272
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1272
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1273
		pdev->subsystem_vendor, pdev->subsystem_device);
1273
		pdev->subsystem_vendor, pdev->subsystem_device);
1274
 
1274
 
1275
    /* mutex initialization are all done here so we
1275
    /* mutex initialization are all done here so we
1276
     * can recall function without having locking issues */
1276
     * can recall function without having locking issues */
1277
	mutex_init(&rdev->ring_lock);
1277
	mutex_init(&rdev->ring_lock);
1278
	mutex_init(&rdev->dc_hw_i2c_mutex);
1278
	mutex_init(&rdev->dc_hw_i2c_mutex);
1279
	atomic_set(&rdev->ih.lock, 0);
1279
	atomic_set(&rdev->ih.lock, 0);
1280
	mutex_init(&rdev->gem.mutex);
1280
	mutex_init(&rdev->gem.mutex);
1281
	mutex_init(&rdev->pm.mutex);
1281
	mutex_init(&rdev->pm.mutex);
1282
	mutex_init(&rdev->gpu_clock_mutex);
1282
	mutex_init(&rdev->gpu_clock_mutex);
1283
	mutex_init(&rdev->srbm_mutex);
1283
	mutex_init(&rdev->srbm_mutex);
1284
	mutex_init(&rdev->grbm_idx_mutex);
1284
	mutex_init(&rdev->grbm_idx_mutex);
1285
 
-
 
1286
//   init_rwsem(&rdev->pm.mclk_lock);
1285
	init_rwsem(&rdev->pm.mclk_lock);
1287
//   init_rwsem(&rdev->exclusive_lock);
1286
	init_rwsem(&rdev->exclusive_lock);
1288
	init_waitqueue_head(&rdev->irq.vblank_queue);
1287
	init_waitqueue_head(&rdev->irq.vblank_queue);
1289
	mutex_init(&rdev->mn_lock);
1288
	mutex_init(&rdev->mn_lock);
1290
//	hash_init(rdev->mn_hash);
1289
//	hash_init(rdev->mn_hash);
1291
	r = radeon_gem_init(rdev);
1290
	r = radeon_gem_init(rdev);
1292
	if (r)
1291
	if (r)
1293
		return r;
1292
		return r;
1294
 
1293
 
1295
	radeon_check_arguments(rdev);
1294
	radeon_check_arguments(rdev);
1296
	/* Adjust VM size here.
1295
	/* Adjust VM size here.
1297
	 * Max GPUVM size for cayman+ is 40 bits.
1296
	 * Max GPUVM size for cayman+ is 40 bits.
1298
	 */
1297
	 */
1299
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1298
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1300
 
1299
 
1301
	/* Set asic functions */
1300
	/* Set asic functions */
1302
	r = radeon_asic_init(rdev);
1301
	r = radeon_asic_init(rdev);
1303
	if (r)
1302
	if (r)
1304
		return r;
1303
		return r;
1305
 
1304
 
1306
	/* all of the newer IGP chips have an internal gart
1305
	/* all of the newer IGP chips have an internal gart
1307
	 * However some rs4xx report as AGP, so remove that here.
1306
	 * However some rs4xx report as AGP, so remove that here.
1308
	 */
1307
	 */
1309
	if ((rdev->family >= CHIP_RS400) &&
1308
	if ((rdev->family >= CHIP_RS400) &&
1310
	    (rdev->flags & RADEON_IS_IGP)) {
1309
	    (rdev->flags & RADEON_IS_IGP)) {
1311
		rdev->flags &= ~RADEON_IS_AGP;
1310
		rdev->flags &= ~RADEON_IS_AGP;
1312
	}
1311
	}
1313
 
1312
 
1314
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1313
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1315
		radeon_agp_disable(rdev);
1314
		radeon_agp_disable(rdev);
1316
    }
1315
    }
1317
 
1316
 
1318
	/* Set the internal MC address mask
1317
	/* Set the internal MC address mask
1319
	 * This is the max address of the GPU's
1318
	 * This is the max address of the GPU's
1320
	 * internal address space.
1319
	 * internal address space.
1321
	 */
1320
	 */
1322
	if (rdev->family >= CHIP_CAYMAN)
1321
	if (rdev->family >= CHIP_CAYMAN)
1323
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1322
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1324
	else if (rdev->family >= CHIP_CEDAR)
1323
	else if (rdev->family >= CHIP_CEDAR)
1325
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1324
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1326
	else
1325
	else
1327
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1326
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1328
 
1327
 
1329
	/* set DMA mask + need_dma32 flags.
1328
	/* set DMA mask + need_dma32 flags.
1330
	 * PCIE - can handle 40-bits.
1329
	 * PCIE - can handle 40-bits.
1331
	 * IGP - can handle 40-bits
1330
	 * IGP - can handle 40-bits
1332
	 * AGP - generally dma32 is safest
1331
	 * AGP - generally dma32 is safest
1333
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1332
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1334
	 */
1333
	 */
1335
	rdev->need_dma32 = false;
1334
	rdev->need_dma32 = false;
1336
	if (rdev->flags & RADEON_IS_AGP)
1335
	if (rdev->flags & RADEON_IS_AGP)
1337
		rdev->need_dma32 = true;
1336
		rdev->need_dma32 = true;
1338
	if ((rdev->flags & RADEON_IS_PCI) &&
1337
	if ((rdev->flags & RADEON_IS_PCI) &&
1339
	    (rdev->family <= CHIP_RS740))
1338
	    (rdev->family <= CHIP_RS740))
1340
		rdev->need_dma32 = true;
1339
		rdev->need_dma32 = true;
1341
 
1340
 
1342
	dma_bits = rdev->need_dma32 ? 32 : 40;
1341
	dma_bits = rdev->need_dma32 ? 32 : 40;
1343
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1342
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1344
    if (r) {
1343
    if (r) {
1345
		rdev->need_dma32 = true;
1344
		rdev->need_dma32 = true;
1346
		dma_bits = 32;
1345
		dma_bits = 32;
1347
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1346
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1348
    }
1347
    }
1349
 
1348
 
1350
    /* Registers mapping */
1349
    /* Registers mapping */
1351
    /* TODO: block userspace mapping of io register */
1350
    /* TODO: block userspace mapping of io register */
1352
	spin_lock_init(&rdev->mmio_idx_lock);
1351
	spin_lock_init(&rdev->mmio_idx_lock);
1353
	spin_lock_init(&rdev->smc_idx_lock);
1352
	spin_lock_init(&rdev->smc_idx_lock);
1354
	spin_lock_init(&rdev->pll_idx_lock);
1353
	spin_lock_init(&rdev->pll_idx_lock);
1355
	spin_lock_init(&rdev->mc_idx_lock);
1354
	spin_lock_init(&rdev->mc_idx_lock);
1356
	spin_lock_init(&rdev->pcie_idx_lock);
1355
	spin_lock_init(&rdev->pcie_idx_lock);
1357
	spin_lock_init(&rdev->pciep_idx_lock);
1356
	spin_lock_init(&rdev->pciep_idx_lock);
1358
	spin_lock_init(&rdev->pif_idx_lock);
1357
	spin_lock_init(&rdev->pif_idx_lock);
1359
	spin_lock_init(&rdev->cg_idx_lock);
1358
	spin_lock_init(&rdev->cg_idx_lock);
1360
	spin_lock_init(&rdev->uvd_idx_lock);
1359
	spin_lock_init(&rdev->uvd_idx_lock);
1361
	spin_lock_init(&rdev->rcu_idx_lock);
1360
	spin_lock_init(&rdev->rcu_idx_lock);
1362
	spin_lock_init(&rdev->didt_idx_lock);
1361
	spin_lock_init(&rdev->didt_idx_lock);
1363
	spin_lock_init(&rdev->end_idx_lock);
1362
	spin_lock_init(&rdev->end_idx_lock);
1364
	if (rdev->family >= CHIP_BONAIRE) {
1363
	if (rdev->family >= CHIP_BONAIRE) {
1365
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1364
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1366
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1365
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1367
	} else {
1366
	} else {
1368
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1367
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1369
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1368
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1370
	}
1369
	}
1371
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1370
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1372
    if (rdev->rmmio == NULL) {
1371
    if (rdev->rmmio == NULL) {
1373
        return -ENOMEM;
1372
        return -ENOMEM;
1374
    }
1373
    }
1375
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1374
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1376
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1375
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1377
 
1376
 
1378
	/* doorbell bar mapping */
1377
	/* doorbell bar mapping */
1379
	if (rdev->family >= CHIP_BONAIRE)
1378
	if (rdev->family >= CHIP_BONAIRE)
1380
		radeon_doorbell_init(rdev);
1379
		radeon_doorbell_init(rdev);
1381
 
1380
 
1382
	/* io port mapping */
1381
	/* io port mapping */
1383
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1382
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1384
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1383
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1385
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1384
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1386
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1385
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1387
			break;
1386
			break;
1388
		}
1387
		}
1389
	}
1388
	}
1390
	if (rdev->rio_mem == NULL)
1389
	if (rdev->rio_mem == NULL)
1391
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1390
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1392
 
1391
 
1393
	if (rdev->flags & RADEON_IS_PX)
1392
	if (rdev->flags & RADEON_IS_PX)
1394
		radeon_device_handle_px_quirks(rdev);
1393
		radeon_device_handle_px_quirks(rdev);
1395
	if (rdev->flags & RADEON_IS_PX)
1394
	if (rdev->flags & RADEON_IS_PX)
1396
		runtime = true;
1395
		runtime = true;
1397
 
1396
 
1398
	r = radeon_init(rdev);
1397
	r = radeon_init(rdev);
1399
	if (r)
1398
	if (r)
1400
        return r;
1399
        return r;
1401
 
1400
 
1402
 
1401
 
1403
 
1402
 
1404
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1403
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1405
		/* Acceleration not working on AGP card try again
1404
		/* Acceleration not working on AGP card try again
1406
		 * with fallback to PCI or PCIE GART
1405
		 * with fallback to PCI or PCIE GART
1407
		 */
1406
		 */
1408
		radeon_asic_reset(rdev);
1407
		radeon_asic_reset(rdev);
1409
		radeon_fini(rdev);
1408
		radeon_fini(rdev);
1410
		radeon_agp_disable(rdev);
1409
		radeon_agp_disable(rdev);
1411
		r = radeon_init(rdev);
1410
		r = radeon_init(rdev);
1412
		if (r)
1411
		if (r)
1413
		return r;
1412
		return r;
1414
	}
1413
	}
1415
 
1414
 
1416
//   r = radeon_ib_ring_tests(rdev);
1415
//   r = radeon_ib_ring_tests(rdev);
1417
//   if (r)
1416
//   if (r)
1418
//       DRM_ERROR("ib ring test failed (%d).\n", r);
1417
//       DRM_ERROR("ib ring test failed (%d).\n", r);
1419
 
1418
 
1420
	if ((radeon_testing & 1)) {
1419
	if ((radeon_testing & 1)) {
1421
		if (rdev->accel_working)
1420
		if (rdev->accel_working)
1422
			radeon_test_moves(rdev);
1421
			radeon_test_moves(rdev);
1423
		else
1422
		else
1424
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1423
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1425
	}
1424
	}
1426
	if ((radeon_testing & 2)) {
1425
	if ((radeon_testing & 2)) {
1427
		if (rdev->accel_working)
1426
		if (rdev->accel_working)
1428
			radeon_test_syncing(rdev);
1427
			radeon_test_syncing(rdev);
1429
		else
1428
		else
1430
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1429
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1431
	}
1430
	}
1432
   if (radeon_benchmarking) {
1431
   if (radeon_benchmarking) {
1433
		if (rdev->accel_working)
1432
		if (rdev->accel_working)
1434
		radeon_benchmark(rdev, radeon_benchmarking);
1433
		radeon_benchmark(rdev, radeon_benchmarking);
1435
		else
1434
		else
1436
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1435
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1437
    }
1436
    }
1438
	return 0;
1437
	return 0;
1439
}
1438
}
1440
 
1439
 
1441
/**
1440
/**
1442
 * radeon_gpu_reset - reset the asic
1441
 * radeon_gpu_reset - reset the asic
1443
 *
1442
 *
1444
 * @rdev: radeon device pointer
1443
 * @rdev: radeon device pointer
1445
 *
1444
 *
1446
 * Attempt the reset the GPU if it has hung (all asics).
1445
 * Attempt the reset the GPU if it has hung (all asics).
1447
 * Returns 0 for success or an error on failure.
1446
 * Returns 0 for success or an error on failure.
1448
 */
1447
 */
1449
int radeon_gpu_reset(struct radeon_device *rdev)
1448
int radeon_gpu_reset(struct radeon_device *rdev)
1450
{
1449
{
1451
    unsigned ring_sizes[RADEON_NUM_RINGS];
1450
    unsigned ring_sizes[RADEON_NUM_RINGS];
1452
    uint32_t *ring_data[RADEON_NUM_RINGS];
1451
    uint32_t *ring_data[RADEON_NUM_RINGS];
1453
 
1452
 
1454
    bool saved = false;
1453
    bool saved = false;
1455
 
1454
 
1456
    int i, r;
1455
    int i, r;
1457
    int resched;
1456
    int resched;
1458
 
1457
 
-
 
1458
	down_write(&rdev->exclusive_lock);
1459
//    down_write(&rdev->exclusive_lock);
1459
 
-
 
1460
	if (!rdev->needs_reset) {
-
 
1461
		up_write(&rdev->exclusive_lock);
-
 
1462
		return 0;
1460
	rdev->needs_reset = false;
1463
	}
1461
 
1464
 
1462
    radeon_save_bios_scratch_regs(rdev);
1465
    radeon_save_bios_scratch_regs(rdev);
1463
    /* block TTM */
1466
    /* block TTM */
1464
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1467
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1465
    radeon_suspend(rdev);
1468
    radeon_suspend(rdev);
1466
 
1469
 
1467
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1470
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1468
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1471
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1469
                           &ring_data[i]);
1472
                           &ring_data[i]);
1470
        if (ring_sizes[i]) {
1473
        if (ring_sizes[i]) {
1471
            saved = true;
1474
            saved = true;
1472
            dev_info(rdev->dev, "Saved %d dwords of commands "
1475
            dev_info(rdev->dev, "Saved %d dwords of commands "
1473
                 "on ring %d.\n", ring_sizes[i], i);
1476
                 "on ring %d.\n", ring_sizes[i], i);
1474
        }
1477
        }
1475
    }
1478
    }
1476
 
1479
 
1477
    r = radeon_asic_reset(rdev);
1480
    r = radeon_asic_reset(rdev);
1478
    if (!r) {
1481
    if (!r) {
1479
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1482
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1480
        radeon_resume(rdev);
1483
        radeon_resume(rdev);
1481
    }
1484
    }
1482
 
1485
 
1483
    radeon_restore_bios_scratch_regs(rdev);
1486
    radeon_restore_bios_scratch_regs(rdev);
1484
 
1487
 
1485
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1488
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1486
		if (!r && ring_data[i]) {
1489
		if (!r && ring_data[i]) {
1487
            radeon_ring_restore(rdev, &rdev->ring[i],
1490
            radeon_ring_restore(rdev, &rdev->ring[i],
1488
                        ring_sizes[i], ring_data[i]);
1491
                        ring_sizes[i], ring_data[i]);
1489
    } else {
1492
    } else {
1490
			radeon_fence_driver_force_completion(rdev, i);
1493
			radeon_fence_driver_force_completion(rdev, i);
1491
            kfree(ring_data[i]);
1494
            kfree(ring_data[i]);
1492
        }
1495
        }
1493
    }
1496
    }
1494
 
1497
 
1495
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1498
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1496
    if (r) {
1499
    if (r) {
1497
        /* bad news, how to tell it to userspace ? */
1500
        /* bad news, how to tell it to userspace ? */
1498
        dev_info(rdev->dev, "GPU reset failed\n");
1501
        dev_info(rdev->dev, "GPU reset failed\n");
1499
    }
1502
    }
-
 
1503
 
-
 
1504
	rdev->needs_reset = r == -EAGAIN;
-
 
1505
	rdev->in_reset = false;
1500
 
1506
 
1501
//    up_write(&rdev->exclusive_lock);
1507
	up_read(&rdev->exclusive_lock);
1502
    return r;
1508
    return r;
1503
}
1509
}
1504
 
1510
 
1505
 
1511
 
1506
/*
1512
/*
1507
 * Driver load/unload
1513
 * Driver load/unload
1508
 */
1514
 */
1509
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1515
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1510
{
1516
{
1511
    struct radeon_device *rdev;
1517
    struct radeon_device *rdev;
1512
    int r;
1518
    int r;
1513
 
1519
 
1514
 
1520
 
1515
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1521
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1516
    if (rdev == NULL) {
1522
    if (rdev == NULL) {
1517
        return -ENOMEM;
1523
        return -ENOMEM;
1518
    };
1524
    };
1519
 
1525
 
1520
    dev->dev_private = (void *)rdev;
1526
    dev->dev_private = (void *)rdev;
1521
 
1527
 
1522
    /* update BUS flag */
1528
    /* update BUS flag */
1523
    if (drm_pci_device_is_agp(dev)) {
1529
    if (drm_pci_device_is_agp(dev)) {
1524
        flags |= RADEON_IS_AGP;
1530
        flags |= RADEON_IS_AGP;
1525
    } else if (drm_device_is_pcie(dev)) {
1531
    } else if (drm_device_is_pcie(dev)) {
1526
        flags |= RADEON_IS_PCIE;
1532
        flags |= RADEON_IS_PCIE;
1527
    } else {
1533
    } else {
1528
        flags |= RADEON_IS_PCI;
1534
        flags |= RADEON_IS_PCI;
1529
    }
1535
    }
1530
 
1536
 
1531
    /* radeon_device_init should report only fatal error
1537
    /* radeon_device_init should report only fatal error
1532
     * like memory allocation failure or iomapping failure,
1538
     * like memory allocation failure or iomapping failure,
1533
     * or memory manager initialization failure, it must
1539
     * or memory manager initialization failure, it must
1534
     * properly initialize the GPU MC controller and permit
1540
     * properly initialize the GPU MC controller and permit
1535
     * VRAM allocation
1541
     * VRAM allocation
1536
     */
1542
     */
1537
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1543
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1538
    if (r) {
1544
    if (r) {
1539
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1545
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1540
        return r;
1546
        return r;
1541
    }
1547
    }
1542
    /* Again modeset_init should fail only on fatal error
1548
    /* Again modeset_init should fail only on fatal error
1543
     * otherwise it should provide enough functionalities
1549
     * otherwise it should provide enough functionalities
1544
     * for shadowfb to run
1550
     * for shadowfb to run
1545
     */
1551
     */
1546
    main_device = dev;
1552
    main_device = dev;
1547
 
1553
 
1548
    if( radeon_modeset )
1554
    if( radeon_modeset )
1549
    {
1555
    {
1550
        r = radeon_modeset_init(rdev);
1556
        r = radeon_modeset_init(rdev);
1551
        if (r) {
1557
        if (r) {
1552
            return r;
1558
            return r;
1553
        }
1559
        }
1554
        init_display_kms(dev, &usermode);
1560
        init_display_kms(dev, &usermode);
1555
    }
1561
    }
1556
    else
1562
    else
1557
        init_display(rdev, &usermode);
1563
        init_display(rdev, &usermode);
1558
 
1564
 
1559
    return 0;
1565
    return 0;
1560
}
1566
}
1561
 
1567
 
1562
 
1568
 
1563
 
1569
 
1564
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1570
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1565
{
1571
{
1566
    return pci_resource_start(dev->pdev, resource);
1572
    return pci_resource_start(dev->pdev, resource);
1567
}
1573
}
1568
 
1574
 
1569
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1575
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1570
{
1576
{
1571
    return pci_resource_len(dev->pdev, resource);
1577
    return pci_resource_len(dev->pdev, resource);
1572
}
1578
}
1573
 
1579
 
1574
 
1580
 
1575
uint32_t __div64_32(uint64_t *n, uint32_t base)
1581
uint32_t __div64_32(uint64_t *n, uint32_t base)
1576
{
1582
{
1577
        uint64_t rem = *n;
1583
        uint64_t rem = *n;
1578
        uint64_t b = base;
1584
        uint64_t b = base;
1579
        uint64_t res, d = 1;
1585
        uint64_t res, d = 1;
1580
        uint32_t high = rem >> 32;
1586
        uint32_t high = rem >> 32;
1581
 
1587
 
1582
        /* Reduce the thing a bit first */
1588
        /* Reduce the thing a bit first */
1583
        res = 0;
1589
        res = 0;
1584
        if (high >= base) {
1590
        if (high >= base) {
1585
                high /= base;
1591
                high /= base;
1586
                res = (uint64_t) high << 32;
1592
                res = (uint64_t) high << 32;
1587
                rem -= (uint64_t) (high*base) << 32;
1593
                rem -= (uint64_t) (high*base) << 32;
1588
        }
1594
        }
1589
 
1595
 
1590
        while ((int64_t)b > 0 && b < rem) {
1596
        while ((int64_t)b > 0 && b < rem) {
1591
                b = b+b;
1597
                b = b+b;
1592
                d = d+d;
1598
                d = d+d;
1593
        }
1599
        }
1594
 
1600
 
1595
        do {
1601
        do {
1596
                if (rem >= b) {
1602
                if (rem >= b) {
1597
                        rem -= b;
1603
                        rem -= b;
1598
                        res += d;
1604
                        res += d;
1599
                }
1605
                }
1600
                b >>= 1;
1606
                b >>= 1;
1601
                d >>= 1;
1607
                d >>= 1;
1602
        } while (d);
1608
        } while (d);
1603
 
1609
 
1604
        *n = res;
1610
        *n = res;
1605
        return rem;
1611
        return rem;
1606
}
1612
}
1607
 
1613
 
1608
static struct pci_device_id pciidlist[] = {
1614
static struct pci_device_id pciidlist[] = {
1609
    radeon_PCI_IDS
1615
    radeon_PCI_IDS
1610
};
1616
};
1611
 
1617
 
1612
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1618
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1613
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1619
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1614
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1620
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1615
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1621
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1616
 
1622
 
1617
 
1623
 
1618
static struct drm_driver kms_driver = {
1624
static struct drm_driver kms_driver = {
1619
    .driver_features =
1625
    .driver_features =
1620
        DRIVER_USE_AGP |
1626
        DRIVER_USE_AGP |
1621
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1627
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1622
        DRIVER_PRIME | DRIVER_RENDER,
1628
        DRIVER_PRIME | DRIVER_RENDER,
1623
    .load = radeon_driver_load_kms,
1629
    .load = radeon_driver_load_kms,
1624
//    .open = radeon_driver_open_kms,
1630
//    .open = radeon_driver_open_kms,
1625
//    .preclose = radeon_driver_preclose_kms,
1631
//    .preclose = radeon_driver_preclose_kms,
1626
//    .postclose = radeon_driver_postclose_kms,
1632
//    .postclose = radeon_driver_postclose_kms,
1627
//    .lastclose = radeon_driver_lastclose_kms,
1633
//    .lastclose = radeon_driver_lastclose_kms,
1628
//    .unload = radeon_driver_unload_kms,
1634
//    .unload = radeon_driver_unload_kms,
1629
//    .get_vblank_counter = radeon_get_vblank_counter_kms,
1635
//    .get_vblank_counter = radeon_get_vblank_counter_kms,
1630
//    .enable_vblank = radeon_enable_vblank_kms,
1636
//    .enable_vblank = radeon_enable_vblank_kms,
1631
//    .disable_vblank = radeon_disable_vblank_kms,
1637
//    .disable_vblank = radeon_disable_vblank_kms,
1632
//    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1638
//    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1633
//    .get_scanout_position = radeon_get_crtc_scanoutpos,
1639
//    .get_scanout_position = radeon_get_crtc_scanoutpos,
1634
#if defined(CONFIG_DEBUG_FS)
1640
#if defined(CONFIG_DEBUG_FS)
1635
    .debugfs_init = radeon_debugfs_init,
1641
    .debugfs_init = radeon_debugfs_init,
1636
    .debugfs_cleanup = radeon_debugfs_cleanup,
1642
    .debugfs_cleanup = radeon_debugfs_cleanup,
1637
#endif
1643
#endif
1638
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1644
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1639
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1645
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1640
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1646
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1641
    .irq_handler = radeon_driver_irq_handler_kms,
1647
    .irq_handler = radeon_driver_irq_handler_kms,
1642
//    .ioctls = radeon_ioctls_kms,
1648
//    .ioctls = radeon_ioctls_kms,
1643
//    .gem_free_object = radeon_gem_object_free,
1649
//    .gem_free_object = radeon_gem_object_free,
1644
//    .gem_open_object = radeon_gem_object_open,
1650
//    .gem_open_object = radeon_gem_object_open,
1645
//    .gem_close_object = radeon_gem_object_close,
1651
//    .gem_close_object = radeon_gem_object_close,
1646
//    .dumb_create = radeon_mode_dumb_create,
1652
//    .dumb_create = radeon_mode_dumb_create,
1647
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1653
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1648
//    .dumb_destroy = drm_gem_dumb_destroy,
1654
//    .dumb_destroy = drm_gem_dumb_destroy,
1649
//    .fops = &radeon_driver_kms_fops,
1655
//    .fops = &radeon_driver_kms_fops,
1650
 
1656
 
1651
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1657
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1652
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1658
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1653
//    .gem_prime_export = drm_gem_prime_export,
1659
//    .gem_prime_export = drm_gem_prime_export,
1654
//    .gem_prime_import = drm_gem_prime_import,
1660
//    .gem_prime_import = drm_gem_prime_import,
1655
//    .gem_prime_pin = radeon_gem_prime_pin,
1661
//    .gem_prime_pin = radeon_gem_prime_pin,
1656
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1662
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1657
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1663
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1658
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1664
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1659
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1665
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1660
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1666
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1661
 
1667
 
1662
};
1668
};
1663
 
1669
 
1664
int ati_init(void)
1670
int ati_init(void)
1665
{
1671
{
1666
    static pci_dev_t device;
1672
    static pci_dev_t device;
1667
    const struct pci_device_id  *ent;
1673
    const struct pci_device_id  *ent;
1668
    int  err;
1674
    int  err;
1669
 
1675
 
1670
    ent = find_pci_device(&device, pciidlist);
1676
    ent = find_pci_device(&device, pciidlist);
1671
    if( unlikely(ent == NULL) )
1677
    if( unlikely(ent == NULL) )
1672
    {
1678
    {
1673
        dbgprintf("device not found\n");
1679
        dbgprintf("device not found\n");
1674
        return -ENODEV;
1680
        return -ENODEV;
1675
    };
1681
    };
1676
 
1682
 
1677
    drm_core_init();
1683
    drm_core_init();
1678
 
1684
 
1679
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1685
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1680
                                device.pci_dev.device);
1686
                                device.pci_dev.device);
1681
 
1687
 
1682
    kms_driver.driver_features |= DRIVER_MODESET;
1688
    kms_driver.driver_features |= DRIVER_MODESET;
1683
 
1689
 
1684
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1690
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1685
 
1691
 
1686
    return err;
1692
    return err;
1687
}
1693
}