Subversion Repositories Kolibri OS

Rev

Rev 6938 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
2997 Serge 29
#include 
1179 serge 30
#include 
31
#include 
1221 serge 32
#include 
6104 serge 33
#include 
7146 serge 34
#include 
1117 serge 35
#include "radeon_reg.h"
36
#include "radeon.h"
37
#include "atom.h"
3120 serge 38
 
1428 serge 39
#include "display.h"
1117 serge 40
 
3120 serge 41
 
1221 serge 42
#include 
43
 
5078 serge 44
#define PCI_VENDOR_ID_ATI               0x1002
45
#define PCI_VENDOR_ID_APPLE             0x106b
1117 serge 46
 
5078 serge 47
int radeon_no_wb;
1430 serge 48
int radeon_modeset = -1;
49
int radeon_dynclks = -1;
50
int radeon_r4xx_atom = 0;
51
int radeon_agpmode = 0;
52
int radeon_vram_limit = 0;
5078 serge 53
int radeon_gart_size = -1; /* auto */
1430 serge 54
int radeon_benchmarking = 0;
55
int radeon_testing = 0;
56
int radeon_connector_table = 0;
57
int radeon_tv = 1;
5078 serge 58
int radeon_audio = -1;
59
int radeon_disp_priority = 0;
1963 serge 60
int radeon_hw_i2c = 0;
5078 serge 61
int radeon_pcie_gen2 = -1;
62
int radeon_msi = -1;
2997 Serge 63
int radeon_lockup_timeout = 10000;
3764 Serge 64
int radeon_fastfb = 0;
5078 serge 65
int radeon_dpm = -1;
66
int radeon_aspm = -1;
67
int radeon_runtime_pm = -1;
68
int radeon_hard_reset = 0;
69
int radeon_vm_size = 8;
70
int radeon_vm_block_size = -1;
71
int radeon_deep_color = 0;
72
int radeon_use_pflipirq = 2;
2160 serge 73
int irq_override = 0;
5078 serge 74
int radeon_bapm = -1;
5271 serge 75
int radeon_backlight = 0;
6104 serge 76
int radeon_auxch = -1;
77
int radeon_mst = 0;
6938 serge 78
 
5078 serge 79
extern display_t *os_display;
80
extern struct drm_device *main_device;
81
extern videomode_t usermode;
1246 serge 82
 
3120 serge 83
 
1404 serge 84
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
85
int init_display(struct radeon_device *rdev, videomode_t *mode);
5078 serge 86
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
1117 serge 87
 
5271 serge 88
int get_modes(videomode_t *mode, u32 *count);
1404 serge 89
int set_user_mode(videomode_t *mode);
1428 serge 90
int r100_2D_test(struct radeon_device *rdev);
1239 serge 91
 
1404 serge 92
 
1233 serge 93
 /* Legacy VGA regions */
94
#define VGA_RSRC_NONE          0x00
95
#define VGA_RSRC_LEGACY_IO     0x01
96
#define VGA_RSRC_LEGACY_MEM    0x02
97
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
98
/* Non-legacy access */
99
#define VGA_RSRC_NORMAL_IO     0x04
100
#define VGA_RSRC_NORMAL_MEM    0x08
101
 
102
 
1963 serge 103
static const char radeon_family_name[][16] = {
104
	"R100",
105
	"RV100",
106
	"RS100",
107
	"RV200",
108
	"RS200",
109
	"R200",
110
	"RV250",
111
	"RS300",
112
	"RV280",
113
	"R300",
114
	"R350",
115
	"RV350",
116
	"RV380",
117
	"R420",
118
	"R423",
119
	"RV410",
120
	"RS400",
121
	"RS480",
122
	"RS600",
123
	"RS690",
124
	"RS740",
125
	"RV515",
126
	"R520",
127
	"RV530",
128
	"RV560",
129
	"RV570",
130
	"R580",
131
	"R600",
132
	"RV610",
133
	"RV630",
134
	"RV670",
135
	"RV620",
136
	"RV635",
137
	"RS780",
138
	"RS880",
139
	"RV770",
140
	"RV730",
141
	"RV710",
142
	"RV740",
143
	"CEDAR",
144
	"REDWOOD",
145
	"JUNIPER",
146
	"CYPRESS",
147
	"HEMLOCK",
148
	"PALM",
1986 serge 149
	"SUMO",
150
	"SUMO2",
1963 serge 151
	"BARTS",
152
	"TURKS",
153
	"CAICOS",
154
	"CAYMAN",
2997 Serge 155
	"ARUBA",
156
	"TAHITI",
157
	"PITCAIRN",
158
	"VERDE",
3764 Serge 159
	"OLAND",
160
	"HAINAN",
5078 serge 161
	"BONAIRE",
162
	"KAVERI",
163
	"KABINI",
164
	"HAWAII",
165
	"MULLINS",
1963 serge 166
	"LAST",
167
};
1233 serge 168
 
5078 serge 169
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
170
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
171
 
172
struct radeon_px_quirk {
173
	u32 chip_vendor;
174
	u32 chip_device;
175
	u32 subsys_vendor;
176
	u32 subsys_device;
177
	u32 px_quirk_flags;
178
};
179
 
180
static struct radeon_px_quirk radeon_px_quirk_list[] = {
181
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
182
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
183
	 */
184
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
185
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
186
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
187
	 */
188
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
5179 serge 189
	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
190
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
191
	 */
192
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
5078 serge 193
	/* macbook pro 8.2 */
194
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
195
	{ 0, 0, 0, 0, 0 },
196
};
197
 
198
bool radeon_is_px(struct drm_device *dev)
199
{
200
	struct radeon_device *rdev = dev->dev_private;
201
 
202
	if (rdev->flags & RADEON_IS_PX)
203
		return true;
204
	return false;
205
}
206
 
207
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
208
{
209
	struct radeon_px_quirk *p = radeon_px_quirk_list;
210
 
211
	/* Apply PX quirks */
212
	while (p && p->chip_device != 0) {
213
		if (rdev->pdev->vendor == p->chip_vendor &&
214
		    rdev->pdev->device == p->chip_device &&
215
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
216
		    rdev->pdev->subsystem_device == p->subsys_device) {
217
			rdev->px_quirk_flags = p->px_quirk_flags;
218
			break;
219
		}
220
		++p;
221
	}
222
 
223
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
224
		rdev->flags &= ~RADEON_IS_PX;
225
}
226
 
2997 Serge 227
/**
3764 Serge 228
 * radeon_program_register_sequence - program an array of registers.
229
 *
230
 * @rdev: radeon_device pointer
231
 * @registers: pointer to the register array
232
 * @array_size: size of the register array
233
 *
234
 * Programs an array or registers with and and or masks.
235
 * This is a helper for setting golden registers.
236
 */
237
void radeon_program_register_sequence(struct radeon_device *rdev,
238
				      const u32 *registers,
239
				      const u32 array_size)
240
{
241
	u32 tmp, reg, and_mask, or_mask;
242
	int i;
243
 
244
	if (array_size % 3)
245
		return;
246
 
247
	for (i = 0; i < array_size; i +=3) {
248
		reg = registers[i + 0];
249
		and_mask = registers[i + 1];
250
		or_mask = registers[i + 2];
251
 
252
		if (and_mask == 0xffffffff) {
253
			tmp = or_mask;
254
		} else {
255
			tmp = RREG32(reg);
256
			tmp &= ~and_mask;
257
			tmp |= or_mask;
258
		}
259
		WREG32(reg, tmp);
260
	}
261
}
262
 
5078 serge 263
void radeon_pci_config_reset(struct radeon_device *rdev)
264
{
265
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
266
}
267
 
3764 Serge 268
/**
2997 Serge 269
 * radeon_surface_init - Clear GPU surface registers.
270
 *
271
 * @rdev: radeon_device pointer
272
 *
273
 * Clear GPU surface registers (r1xx-r5xx).
1117 serge 274
 */
1179 serge 275
void radeon_surface_init(struct radeon_device *rdev)
1117 serge 276
{
6104 serge 277
	/* FIXME: check this out */
278
	if (rdev->family < CHIP_R600) {
279
		int i;
1117 serge 280
 
1321 serge 281
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
5078 serge 282
			if (rdev->surface_regs[i].bo)
283
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
284
			else
6104 serge 285
				radeon_clear_surface_reg(rdev, i);
286
		}
1179 serge 287
		/* enable surfaces */
288
		WREG32(RADEON_SURFACE_CNTL, 0);
6104 serge 289
	}
1117 serge 290
}
291
 
292
/*
293
 * GPU scratch registers helpers function.
294
 */
2997 Serge 295
/**
296
 * radeon_scratch_init - Init scratch register driver information.
297
 *
298
 * @rdev: radeon_device pointer
299
 *
300
 * Init CP scratch register driver information (r1xx-r5xx)
301
 */
1179 serge 302
void radeon_scratch_init(struct radeon_device *rdev)
1117 serge 303
{
6104 serge 304
	int i;
1117 serge 305
 
6104 serge 306
	/* FIXME: check this out */
307
	if (rdev->family < CHIP_R300) {
308
		rdev->scratch.num_reg = 5;
309
	} else {
310
		rdev->scratch.num_reg = 7;
311
	}
1963 serge 312
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
6104 serge 313
	for (i = 0; i < rdev->scratch.num_reg; i++) {
314
		rdev->scratch.free[i] = true;
1963 serge 315
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
6104 serge 316
	}
1117 serge 317
}
318
 
2997 Serge 319
/**
320
 * radeon_scratch_get - Allocate a scratch register
321
 *
322
 * @rdev: radeon_device pointer
323
 * @reg: scratch register mmio offset
324
 *
325
 * Allocate a CP scratch register for use by the driver (all asics).
326
 * Returns 0 on success or -EINVAL on failure.
327
 */
1117 serge 328
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
329
{
330
	int i;
331
 
332
	for (i = 0; i < rdev->scratch.num_reg; i++) {
333
		if (rdev->scratch.free[i]) {
334
			rdev->scratch.free[i] = false;
335
			*reg = rdev->scratch.reg[i];
336
			return 0;
337
		}
338
	}
339
	return -EINVAL;
340
}
341
 
2997 Serge 342
/**
343
 * radeon_scratch_free - Free a scratch register
344
 *
345
 * @rdev: radeon_device pointer
346
 * @reg: scratch register mmio offset
347
 *
348
 * Free a CP scratch register allocated for use by the driver (all asics)
349
 */
1117 serge 350
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
351
{
352
	int i;
353
 
354
	for (i = 0; i < rdev->scratch.num_reg; i++) {
355
		if (rdev->scratch.reg[i] == reg) {
356
			rdev->scratch.free[i] = true;
357
			return;
358
		}
359
	}
360
}
361
 
2997 Serge 362
/*
5078 serge 363
 * GPU doorbell aperture helpers function.
364
 */
365
/**
366
 * radeon_doorbell_init - Init doorbell driver information.
367
 *
368
 * @rdev: radeon_device pointer
369
 *
370
 * Init doorbell driver information (CIK)
371
 * Returns 0 on success, error on failure.
372
 */
373
static int radeon_doorbell_init(struct radeon_device *rdev)
374
{
375
	/* doorbell bar mapping */
376
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
377
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
378
 
379
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
380
	if (rdev->doorbell.num_doorbells == 0)
381
		return -EINVAL;
382
 
383
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
384
	if (rdev->doorbell.ptr == NULL) {
385
		return -ENOMEM;
386
	}
387
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
388
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
389
 
390
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
391
 
392
	return 0;
393
}
394
 
395
/**
396
 * radeon_doorbell_fini - Tear down doorbell driver information.
397
 *
398
 * @rdev: radeon_device pointer
399
 *
400
 * Tear down doorbell driver information (CIK)
401
 */
402
static void radeon_doorbell_fini(struct radeon_device *rdev)
403
{
404
	iounmap(rdev->doorbell.ptr);
405
	rdev->doorbell.ptr = NULL;
406
}
407
 
408
/**
409
 * radeon_doorbell_get - Allocate a doorbell entry
410
 *
411
 * @rdev: radeon_device pointer
412
 * @doorbell: doorbell index
413
 *
414
 * Allocate a doorbell for use by the driver (all asics).
415
 * Returns 0 on success or -EINVAL on failure.
416
 */
417
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
418
{
419
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
420
	if (offset < rdev->doorbell.num_doorbells) {
421
		__set_bit(offset, rdev->doorbell.used);
422
		*doorbell = offset;
423
		return 0;
424
	} else {
425
		return -EINVAL;
426
	}
427
}
428
 
429
/**
430
 * radeon_doorbell_free - Free a doorbell entry
431
 *
432
 * @rdev: radeon_device pointer
433
 * @doorbell: doorbell index
434
 *
435
 * Free a doorbell allocated for use by the driver (all asics)
436
 */
437
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
438
{
439
	if (doorbell < rdev->doorbell.num_doorbells)
440
		__clear_bit(doorbell, rdev->doorbell.used);
441
}
442
 
5271 serge 443
/**
444
 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
445
 *                                setup KFD
446
 *
447
 * @rdev: radeon_device pointer
448
 * @aperture_base: output returning doorbell aperture base physical address
449
 * @aperture_size: output returning doorbell aperture size in bytes
450
 * @start_offset: output returning # of doorbell bytes reserved for radeon.
451
 *
452
 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
453
 * takes doorbells required for its own rings and reports the setup to KFD.
454
 * Radeon reserved doorbells are at the start of the doorbell aperture.
455
 */
456
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
457
				  phys_addr_t *aperture_base,
458
				  size_t *aperture_size,
459
				  size_t *start_offset)
460
{
461
	/* The first num_doorbells are used by radeon.
462
	 * KFD takes whatever's left in the aperture. */
463
	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
464
		*aperture_base = rdev->doorbell.base;
465
		*aperture_size = rdev->doorbell.size;
466
		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
467
	} else {
468
		*aperture_base = 0;
469
		*aperture_size = 0;
470
		*start_offset = 0;
471
	}
472
}
473
 
5078 serge 474
/*
2997 Serge 475
 * radeon_wb_*()
476
 * Writeback is the the method by which the the GPU updates special pages
477
 * in memory with the status of certain GPU events (fences, ring pointers,
478
 * etc.).
479
 */
480
 
481
/**
482
 * radeon_wb_disable - Disable Writeback
483
 *
484
 * @rdev: radeon_device pointer
485
 *
486
 * Disables Writeback (all asics).  Used for suspend.
487
 */
2004 serge 488
void radeon_wb_disable(struct radeon_device *rdev)
489
{
490
	rdev->wb.enabled = false;
491
}
492
 
2997 Serge 493
/**
494
 * radeon_wb_fini - Disable Writeback and free memory
495
 *
496
 * @rdev: radeon_device pointer
497
 *
498
 * Disables Writeback and frees the Writeback memory (all asics).
499
 * Used at driver shutdown.
500
 */
2004 serge 501
void radeon_wb_fini(struct radeon_device *rdev)
502
{
503
	radeon_wb_disable(rdev);
504
	if (rdev->wb.wb_obj) {
5078 serge 505
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
506
			radeon_bo_kunmap(rdev->wb.wb_obj);
507
			radeon_bo_unpin(rdev->wb.wb_obj);
508
			radeon_bo_unreserve(rdev->wb.wb_obj);
509
		}
2004 serge 510
		radeon_bo_unref(&rdev->wb.wb_obj);
511
		rdev->wb.wb = NULL;
512
		rdev->wb.wb_obj = NULL;
513
	}
514
}
515
 
2997 Serge 516
/**
517
 * radeon_wb_init- Init Writeback driver info and allocate memory
518
 *
519
 * @rdev: radeon_device pointer
520
 *
521
 * Disables Writeback and frees the Writeback memory (all asics).
522
 * Used at driver startup.
523
 * Returns 0 on success or an -error on failure.
524
 */
2004 serge 525
int radeon_wb_init(struct radeon_device *rdev)
526
{
527
	int r;
528
 
529
	if (rdev->wb.wb_obj == NULL) {
530
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
5271 serge 531
				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
5078 serge 532
				     &rdev->wb.wb_obj);
2004 serge 533
		if (r) {
534
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
535
			return r;
536
		}
6104 serge 537
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
538
		if (unlikely(r != 0)) {
539
			radeon_wb_fini(rdev);
540
			return r;
541
		}
542
		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
543
				&rdev->wb.gpu_addr);
544
		if (r) {
545
			radeon_bo_unreserve(rdev->wb.wb_obj);
546
			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
547
			radeon_wb_fini(rdev);
548
			return r;
549
		}
550
		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2004 serge 551
		radeon_bo_unreserve(rdev->wb.wb_obj);
6104 serge 552
		if (r) {
553
			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
554
			radeon_wb_fini(rdev);
555
			return r;
556
		}
2004 serge 557
	}
558
 
559
	/* clear wb memory */
560
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
561
	/* disable event_write fences */
562
	rdev->wb.use_event = false;
563
	/* disabled via module param */
2997 Serge 564
	if (radeon_no_wb == 1) {
2004 serge 565
		rdev->wb.enabled = false;
2997 Serge 566
	} else {
567
		if (rdev->flags & RADEON_IS_AGP) {
6104 serge 568
			/* often unreliable on AGP */
2997 Serge 569
			rdev->wb.enabled = false;
570
		} else if (rdev->family < CHIP_R300) {
571
			/* often unreliable on pre-r300 */
572
			rdev->wb.enabled = false;
573
		} else {
2004 serge 574
			rdev->wb.enabled = true;
575
			/* event_write fences are only available on r600+ */
2997 Serge 576
			if (rdev->family >= CHIP_R600) {
2004 serge 577
				rdev->wb.use_event = true;
6104 serge 578
			}
2997 Serge 579
		}
580
	}
581
	/* always use writeback/events on NI, APUs */
582
	if (rdev->family >= CHIP_PALM) {
2004 serge 583
		rdev->wb.enabled = true;
584
		rdev->wb.use_event = true;
585
	}
586
 
587
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
588
 
589
	return 0;
590
}
591
 
1430 serge 592
/**
593
 * radeon_vram_location - try to find VRAM location
594
 * @rdev: radeon device structure holding all necessary informations
595
 * @mc: memory controller structure holding memory informations
596
 * @base: base address at which to put VRAM
597
 *
598
 * Function will place try to place VRAM at base address provided
599
 * as parameter (which is so far either PCI aperture address or
600
 * for IGP TOM base address).
601
 *
602
 * If there is not enough space to fit the unvisible VRAM in the 32bits
603
 * address space then we limit the VRAM size to the aperture.
604
 *
605
 * If we are using AGP and if the AGP aperture doesn't allow us to have
606
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
607
 * size and print a warning.
608
 *
609
 * This function will never fails, worst case are limiting VRAM.
610
 *
611
 * Note: GTT start, end, size should be initialized before calling this
612
 * function on AGP platform.
613
 *
1963 serge 614
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
1430 serge 615
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
616
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
617
 * not IGP.
618
 *
619
 * Note: we use mc_vram_size as on some board we need to program the mc to
620
 * cover the whole aperture even if VRAM size is inferior to aperture size
621
 * Novell bug 204882 + along with lots of ubuntu ones
622
 *
623
 * Note: when limiting vram it's safe to overwritte real_vram_size because
624
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
625
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
626
 * ones)
627
 *
628
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
629
 * explicitly check for that thought.
630
 *
631
 * FIXME: when reducing VRAM size align new size on power of 2.
1117 serge 632
 */
1430 serge 633
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
1117 serge 634
{
2997 Serge 635
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
636
 
1430 serge 637
	mc->vram_start = base;
3764 Serge 638
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
1430 serge 639
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
640
		mc->real_vram_size = mc->aper_size;
641
		mc->mc_vram_size = mc->aper_size;
642
	}
643
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1963 serge 644
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
1430 serge 645
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
646
		mc->real_vram_size = mc->aper_size;
647
		mc->mc_vram_size = mc->aper_size;
6104 serge 648
	}
1430 serge 649
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2997 Serge 650
	if (limit && limit < mc->real_vram_size)
651
		mc->real_vram_size = limit;
1963 serge 652
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
1430 serge 653
			mc->mc_vram_size >> 20, mc->vram_start,
654
			mc->vram_end, mc->real_vram_size >> 20);
655
}
1117 serge 656
 
1430 serge 657
/**
658
 * radeon_gtt_location - try to find GTT location
659
 * @rdev: radeon device structure holding all necessary informations
660
 * @mc: memory controller structure holding memory informations
661
 *
662
 * Function will place try to place GTT before or after VRAM.
663
 *
664
 * If GTT size is bigger than space left then we ajust GTT size.
665
 * Thus function will never fails.
666
 *
667
 * FIXME: when reducing GTT size align new size on power of 2.
668
 */
669
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
670
{
671
	u64 size_af, size_bf;
672
 
3764 Serge 673
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
1963 serge 674
	size_bf = mc->vram_start & ~mc->gtt_base_align;
1430 serge 675
	if (size_bf > size_af) {
676
		if (mc->gtt_size > size_bf) {
677
			dev_warn(rdev->dev, "limiting GTT\n");
678
			mc->gtt_size = size_bf;
1117 serge 679
		}
1963 serge 680
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
1430 serge 681
	} else {
682
		if (mc->gtt_size > size_af) {
683
			dev_warn(rdev->dev, "limiting GTT\n");
684
			mc->gtt_size = size_af;
1117 serge 685
		}
1963 serge 686
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
1117 serge 687
	}
1430 serge 688
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
1963 serge 689
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
1430 serge 690
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
1117 serge 691
}
692
 
693
/*
694
 * GPU helpers function.
695
 */
2997 Serge 696
/**
697
 * radeon_card_posted - check if the hw has already been initialized
698
 *
699
 * @rdev: radeon_device pointer
700
 *
701
 * Check if the asic has been initialized (all asics).
702
 * Used at driver startup.
703
 * Returns true if initialized or false if not.
704
 */
1179 serge 705
bool radeon_card_posted(struct radeon_device *rdev)
1117 serge 706
{
707
	uint32_t reg;
708
 
3764 Serge 709
	if (ASIC_IS_NODCE(rdev))
710
		goto check_memsize;
711
 
1117 serge 712
	/* first check CRTCs */
3764 Serge 713
	if (ASIC_IS_DCE4(rdev)) {
1430 serge 714
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
1963 serge 715
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3764 Serge 716
			if (rdev->num_crtc >= 4) {
717
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
718
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
719
			}
720
			if (rdev->num_crtc >= 6) {
721
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
6104 serge 722
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3764 Serge 723
			}
1430 serge 724
		if (reg & EVERGREEN_CRTC_MASTER_EN)
725
			return true;
726
	} else if (ASIC_IS_AVIVO(rdev)) {
1117 serge 727
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
728
		      RREG32(AVIVO_D2CRTC_CONTROL);
729
		if (reg & AVIVO_CRTC_EN) {
730
			return true;
731
		}
732
	} else {
733
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
734
		      RREG32(RADEON_CRTC2_GEN_CNTL);
735
		if (reg & RADEON_CRTC_EN) {
736
			return true;
737
		}
738
	}
739
 
3764 Serge 740
check_memsize:
1117 serge 741
	/* then check MEM_SIZE, in case the crtcs are off */
742
	if (rdev->family >= CHIP_R600)
743
		reg = RREG32(R600_CONFIG_MEMSIZE);
744
	else
745
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
746
 
747
	if (reg)
748
		return true;
749
 
750
	return false;
751
 
752
}
753
 
2997 Serge 754
/**
755
 * radeon_update_bandwidth_info - update display bandwidth params
756
 *
757
 * @rdev: radeon_device pointer
758
 *
759
 * Used when sclk/mclk are switched or display modes are set.
760
 * params are used to calculate display watermarks (all asics)
761
 */
1963 serge 762
void radeon_update_bandwidth_info(struct radeon_device *rdev)
763
{
764
	fixed20_12 a;
765
	u32 sclk = rdev->pm.current_sclk;
766
	u32 mclk = rdev->pm.current_mclk;
767
 
768
	/* sclk/mclk in Mhz */
6104 serge 769
	a.full = dfixed_const(100);
770
	rdev->pm.sclk.full = dfixed_const(sclk);
771
	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
772
	rdev->pm.mclk.full = dfixed_const(mclk);
773
	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
1963 serge 774
 
775
	if (rdev->flags & RADEON_IS_IGP) {
776
		a.full = dfixed_const(16);
777
		/* core_bandwidth = sclk(Mhz) * 16 */
778
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
779
	}
780
}
781
 
2997 Serge 782
/**
783
 * radeon_boot_test_post_card - check and possibly initialize the hw
784
 *
785
 * @rdev: radeon_device pointer
786
 *
787
 * Check if the asic is initialized and if not, attempt to initialize
788
 * it (all asics).
789
 * Returns true if initialized or false if not.
790
 */
1321 serge 791
bool radeon_boot_test_post_card(struct radeon_device *rdev)
792
{
793
	if (radeon_card_posted(rdev))
794
		return true;
795
 
796
	if (rdev->bios) {
797
		DRM_INFO("GPU not posted. posting now...\n");
798
		if (rdev->is_atom_bios)
799
			atom_asic_init(rdev->mode_info.atom_context);
800
		else
801
			radeon_combios_asic_init(rdev->ddev);
802
		return true;
803
	} else {
804
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
805
		return false;
806
	}
807
}
808
 
2997 Serge 809
/**
810
 * radeon_dummy_page_init - init dummy page used by the driver
811
 *
812
 * @rdev: radeon_device pointer
813
 *
814
 * Allocate the dummy page used by the driver (all asics).
815
 * This dummy page is used by the driver as a filler for gart entries
816
 * when pages are taken out of the GART
817
 * Returns 0 on sucess, -ENOMEM on failure.
818
 */
1233 serge 819
int radeon_dummy_page_init(struct radeon_device *rdev)
820
{
1430 serge 821
	if (rdev->dummy_page.page)
822
		return 0;
5078 serge 823
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
1233 serge 824
	if (rdev->dummy_page.page == NULL)
825
		return -ENOMEM;
5078 serge 826
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
827
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
6661 serge 828
	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
829
							    RADEON_GART_PAGE_DUMMY);
1233 serge 830
	return 0;
831
}
1117 serge 832
 
2997 Serge 833
/**
834
 * radeon_dummy_page_fini - free dummy page used by the driver
835
 *
836
 * @rdev: radeon_device pointer
837
 *
838
 * Frees the dummy page used by the driver (all asics).
839
 */
1233 serge 840
void radeon_dummy_page_fini(struct radeon_device *rdev)
841
{
842
	if (rdev->dummy_page.page == NULL)
843
		return;
5078 serge 844
 
1233 serge 845
	rdev->dummy_page.page = NULL;
846
}
847
 
848
 
1117 serge 849
/* ATOM accessor methods */
2997 Serge 850
/*
851
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
852
 * driver registers callbacks to access registers and the interpreter
853
 * in the driver parses the tables and executes then to program specific
854
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
855
 * atombios.h, and atom.c
856
 */
857
 
858
/**
859
 * cail_pll_read - read PLL register
860
 *
861
 * @info: atom card_info pointer
862
 * @reg: PLL register offset
863
 *
864
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
865
 * Returns the value of the PLL register.
866
 */
1117 serge 867
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
868
{
6104 serge 869
	struct radeon_device *rdev = info->dev->dev_private;
870
	uint32_t r;
1117 serge 871
 
6104 serge 872
	r = rdev->pll_rreg(rdev, reg);
873
	return r;
1117 serge 874
}
875
 
2997 Serge 876
/**
877
 * cail_pll_write - write PLL register
878
 *
879
 * @info: atom card_info pointer
880
 * @reg: PLL register offset
881
 * @val: value to write to the pll register
882
 *
883
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
884
 */
1117 serge 885
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
886
{
6104 serge 887
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 888
 
6104 serge 889
	rdev->pll_wreg(rdev, reg, val);
1117 serge 890
}
891
 
2997 Serge 892
/**
893
 * cail_mc_read - read MC (Memory Controller) register
894
 *
895
 * @info: atom card_info pointer
896
 * @reg: MC register offset
897
 *
898
 * Provides an MC register accessor for the atom interpreter (r4xx+).
899
 * Returns the value of the MC register.
900
 */
1117 serge 901
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
902
{
6104 serge 903
	struct radeon_device *rdev = info->dev->dev_private;
904
	uint32_t r;
1117 serge 905
 
6104 serge 906
	r = rdev->mc_rreg(rdev, reg);
907
	return r;
1117 serge 908
}
909
 
2997 Serge 910
/**
911
 * cail_mc_write - write MC (Memory Controller) register
912
 *
913
 * @info: atom card_info pointer
914
 * @reg: MC register offset
915
 * @val: value to write to the pll register
916
 *
917
 * Provides a MC register accessor for the atom interpreter (r4xx+).
918
 */
1117 serge 919
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
920
{
6104 serge 921
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 922
 
6104 serge 923
	rdev->mc_wreg(rdev, reg, val);
1117 serge 924
}
925
 
2997 Serge 926
/**
927
 * cail_reg_write - write MMIO register
928
 *
929
 * @info: atom card_info pointer
930
 * @reg: MMIO register offset
931
 * @val: value to write to the pll register
932
 *
933
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
934
 */
1117 serge 935
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
936
{
6104 serge 937
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 938
 
6104 serge 939
	WREG32(reg*4, val);
1117 serge 940
}
941
 
2997 Serge 942
/**
943
 * cail_reg_read - read MMIO register
944
 *
945
 * @info: atom card_info pointer
946
 * @reg: MMIO register offset
947
 *
948
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
949
 * Returns the value of the MMIO register.
950
 */
1117 serge 951
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
952
{
6104 serge 953
	struct radeon_device *rdev = info->dev->dev_private;
954
	uint32_t r;
1117 serge 955
 
6104 serge 956
	r = RREG32(reg*4);
957
	return r;
1117 serge 958
}
959
 
2997 Serge 960
/**
961
 * cail_ioreg_write - write IO register
962
 *
963
 * @info: atom card_info pointer
964
 * @reg: IO register offset
965
 * @val: value to write to the pll register
966
 *
967
 * Provides a IO register accessor for the atom interpreter (r4xx+).
968
 */
1963 serge 969
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
970
{
971
	struct radeon_device *rdev = info->dev->dev_private;
972
 
973
	WREG32_IO(reg*4, val);
974
}
975
 
2997 Serge 976
/**
977
 * cail_ioreg_read - read IO register
978
 *
979
 * @info: atom card_info pointer
980
 * @reg: IO register offset
981
 *
982
 * Provides an IO register accessor for the atom interpreter (r4xx+).
983
 * Returns the value of the IO register.
984
 */
1963 serge 985
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
986
{
987
	struct radeon_device *rdev = info->dev->dev_private;
988
	uint32_t r;
989
 
990
	r = RREG32_IO(reg*4);
991
	return r;
992
}
993
 
2997 Serge 994
/**
995
 * radeon_atombios_init - init the driver info and callbacks for atombios
996
 *
997
 * @rdev: radeon_device pointer
998
 *
999
 * Initializes the driver info and register access callbacks for the
1000
 * ATOM interpreter (r4xx+).
1001
 * Returns 0 on sucess, -ENOMEM on failure.
1002
 * Called at driver startup.
1003
 */
1117 serge 1004
int radeon_atombios_init(struct radeon_device *rdev)
1005
{
1268 serge 1006
	struct card_info *atom_card_info =
1007
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1117 serge 1008
 
1268 serge 1009
	if (!atom_card_info)
1010
		return -ENOMEM;
1011
 
1012
	rdev->mode_info.atom_card_info = atom_card_info;
1013
	atom_card_info->dev = rdev->ddev;
1014
	atom_card_info->reg_read = cail_reg_read;
1015
	atom_card_info->reg_write = cail_reg_write;
1963 serge 1016
	/* needed for iio ops */
1017
	if (rdev->rio_mem) {
1018
		atom_card_info->ioreg_read = cail_ioreg_read;
1019
		atom_card_info->ioreg_write = cail_ioreg_write;
1020
	} else {
1021
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1022
		atom_card_info->ioreg_read = cail_reg_read;
1023
		atom_card_info->ioreg_write = cail_reg_write;
1024
	}
1268 serge 1025
	atom_card_info->mc_read = cail_mc_read;
1026
	atom_card_info->mc_write = cail_mc_write;
1027
	atom_card_info->pll_read = cail_pll_read;
1028
	atom_card_info->pll_write = cail_pll_write;
1029
 
1030
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
3764 Serge 1031
	if (!rdev->mode_info.atom_context) {
1032
		radeon_atombios_fini(rdev);
1033
		return -ENOMEM;
1034
	}
1035
 
1630 serge 1036
	mutex_init(&rdev->mode_info.atom_context->mutex);
5271 serge 1037
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
6104 serge 1038
	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1321 serge 1039
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
6104 serge 1040
	return 0;
1117 serge 1041
}
1042
 
2997 Serge 1043
/**
1044
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1045
 *
1046
 * @rdev: radeon_device pointer
1047
 *
1048
 * Frees the driver info and register access callbacks for the ATOM
1049
 * interpreter (r4xx+).
1050
 * Called at driver shutdown.
1051
 */
1117 serge 1052
void radeon_atombios_fini(struct radeon_device *rdev)
1053
{
1321 serge 1054
	if (rdev->mode_info.atom_context) {
1055
		kfree(rdev->mode_info.atom_context->scratch);
3764 Serge 1056
	}
1119 serge 1057
	kfree(rdev->mode_info.atom_context);
3764 Serge 1058
	rdev->mode_info.atom_context = NULL;
1268 serge 1059
	kfree(rdev->mode_info.atom_card_info);
3764 Serge 1060
	rdev->mode_info.atom_card_info = NULL;
1117 serge 1061
}
1062
 
2997 Serge 1063
/* COMBIOS */
1064
/*
1065
 * COMBIOS is the bios format prior to ATOM. It provides
1066
 * command tables similar to ATOM, but doesn't have a unified
1067
 * parser.  See radeon_combios.c
1068
 */
1069
 
1070
/**
1071
 * radeon_combios_init - init the driver info for combios
1072
 *
1073
 * @rdev: radeon_device pointer
1074
 *
1075
 * Initializes the driver info for combios (r1xx-r3xx).
1076
 * Returns 0 on sucess.
1077
 * Called at driver startup.
1078
 */
1117 serge 1079
int radeon_combios_init(struct radeon_device *rdev)
1080
{
1128 serge 1081
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1117 serge 1082
	return 0;
1083
}
1084
 
2997 Serge 1085
/**
1086
 * radeon_combios_fini - free the driver info for combios
1087
 *
1088
 * @rdev: radeon_device pointer
1089
 *
1090
 * Frees the driver info for combios (r1xx-r3xx).
1091
 * Called at driver shutdown.
1092
 */
1117 serge 1093
void radeon_combios_fini(struct radeon_device *rdev)
1094
{
1095
}
1096
 
2997 Serge 1097
/* if we get transitioned to only one device, take VGA back */
1098
/**
1099
 * radeon_vga_set_decode - enable/disable vga decode
1100
 *
1101
 * @cookie: radeon_device pointer
1102
 * @state: enable/disable vga decode
1103
 *
1104
 * Enable/disable vga decode (all asics).
1105
 * Returns VGA resource flags.
1106
 */
1233 serge 1107
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1108
{
1109
	struct radeon_device *rdev = cookie;
1110
	radeon_vga_set_state(rdev, state);
1111
	if (state)
1112
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1113
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1114
	else
1115
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1116
}
1117 serge 1117
 
2997 Serge 1118
/**
1119
 * radeon_check_pot_argument - check that argument is a power of two
1120
 *
1121
 * @arg: value to check
1122
 *
1123
 * Validates that a certain argument is a power of two (all asics).
1124
 * Returns true if argument is valid.
1125
 */
1126
static bool radeon_check_pot_argument(int arg)
1404 serge 1127
{
2997 Serge 1128
	return (arg & (arg - 1)) == 0;
1129
}
1130
 
1131
/**
6104 serge 1132
 * Determine a sensible default GART size according to ASIC family.
1133
 *
1134
 * @family ASIC family name
1135
 */
1136
static int radeon_gart_size_auto(enum radeon_family family)
1137
{
1138
	/* default to a larger gart size on newer asics */
1139
	if (family >= CHIP_TAHITI)
1140
		return 2048;
1141
	else if (family >= CHIP_RV770)
1142
		return 1024;
1143
	else
1144
		return 512;
1145
}
1146
 
1147
/**
2997 Serge 1148
 * radeon_check_arguments - validate module params
1149
 *
1150
 * @rdev: radeon_device pointer
1151
 *
1152
 * Validates certain module parameters and updates
1153
 * the associated values used by the driver (all asics).
1154
 */
1155
static void radeon_check_arguments(struct radeon_device *rdev)
1156
{
1404 serge 1157
	/* vramlimit must be a power of two */
2997 Serge 1158
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1404 serge 1159
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1160
				radeon_vram_limit);
1161
		radeon_vram_limit = 0;
1162
	}
2997 Serge 1163
 
5078 serge 1164
	if (radeon_gart_size == -1) {
6104 serge 1165
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
5078 serge 1166
	}
1404 serge 1167
	/* gtt size must be power of two and greater or equal to 32M */
2997 Serge 1168
	if (radeon_gart_size < 32) {
5078 serge 1169
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1404 serge 1170
				radeon_gart_size);
6104 serge 1171
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
2997 Serge 1172
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1404 serge 1173
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1174
				radeon_gart_size);
6104 serge 1175
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1404 serge 1176
	}
2997 Serge 1177
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1178
 
1404 serge 1179
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1180
	switch (radeon_agpmode) {
1181
	case -1:
1182
	case 0:
1183
	case 1:
1184
	case 2:
1185
	case 4:
1186
	case 8:
1187
		break;
1188
	default:
1189
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1190
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1191
		radeon_agpmode = 0;
1192
		break;
1193
	}
5078 serge 1194
 
1195
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1196
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1197
			 radeon_vm_size);
1198
		radeon_vm_size = 4;
1199
	}
1200
 
1201
	if (radeon_vm_size < 1) {
6938 serge 1202
		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
5078 serge 1203
			 radeon_vm_size);
1204
		radeon_vm_size = 4;
1205
	}
1206
 
7146 serge 1207
	/*
1208
	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1209
	 */
5078 serge 1210
	if (radeon_vm_size > 1024) {
1211
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1212
			 radeon_vm_size);
1213
		radeon_vm_size = 4;
1214
	}
1215
 
1216
	/* defines number of bits in page table versus page directory,
1217
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1218
	 * page table and the remaining bits are in the page directory */
1219
	if (radeon_vm_block_size == -1) {
1220
 
1221
		/* Total bits covered by PD + PTs */
5179 serge 1222
		unsigned bits = ilog2(radeon_vm_size) + 18;
5078 serge 1223
 
1224
		/* Make sure the PD is 4K in size up to 8GB address space.
1225
		   Above that split equal between PD and PTs */
1226
		if (radeon_vm_size <= 8)
1227
			radeon_vm_block_size = bits - 9;
1228
		else
1229
			radeon_vm_block_size = (bits + 3) / 2;
1230
 
1231
	} else if (radeon_vm_block_size < 9) {
1232
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1233
			 radeon_vm_block_size);
1234
		radeon_vm_block_size = 9;
1235
	}
1236
 
1237
	if (radeon_vm_block_size > 24 ||
1238
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1239
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1240
			 radeon_vm_block_size);
1241
		radeon_vm_block_size = 9;
1242
	}
1404 serge 1243
}
1244
 
5078 serge 1245
/**
1246
 * radeon_device_init - initialize the driver
1247
 *
1248
 * @rdev: radeon_device pointer
1249
 * @pdev: drm dev pointer
1250
 * @pdev: pci dev pointer
1251
 * @flags: driver flags
1252
 *
1253
 * Initializes the driver info and hw (all asics).
1254
 * Returns 0 for success or an error on failure.
1255
 * Called at driver startup.
1256
 */
1117 serge 1257
int radeon_device_init(struct radeon_device *rdev,
6104 serge 1258
		       struct drm_device *ddev,
1259
		       struct pci_dev *pdev,
1260
		       uint32_t flags)
1117 serge 1261
{
1963 serge 1262
	int r, i;
1179 serge 1263
	int dma_bits;
5078 serge 1264
	bool runtime = false;
1117 serge 1265
 
6104 serge 1266
	rdev->shutdown = false;
5078 serge 1267
	rdev->dev = &pdev->dev;
6104 serge 1268
	rdev->ddev = ddev;
1269
	rdev->pdev = pdev;
1270
	rdev->flags = flags;
1271
	rdev->family = flags & RADEON_FAMILY_MASK;
1272
	rdev->is_atom_bios = false;
1273
	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
5078 serge 1274
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1221 serge 1275
	rdev->accel_working = false;
2997 Serge 1276
	/* set up ring ids */
1277
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1278
		rdev->ring[i].idx = i;
1279
	}
5271 serge 1280
	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1963 serge 1281
 
7146 serge 1282
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1283
		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1284
		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1963 serge 1285
 
6104 serge 1286
	/* mutex initialization are all done here so we
1287
	 * can recall function without having locking issues */
2997 Serge 1288
	mutex_init(&rdev->ring_lock);
1630 serge 1289
	mutex_init(&rdev->dc_hw_i2c_mutex);
2997 Serge 1290
	atomic_set(&rdev->ih.lock, 0);
1630 serge 1291
	mutex_init(&rdev->gem.mutex);
1292
	mutex_init(&rdev->pm.mutex);
2997 Serge 1293
	mutex_init(&rdev->gpu_clock_mutex);
5078 serge 1294
	mutex_init(&rdev->srbm_mutex);
5271 serge 1295
	mutex_init(&rdev->grbm_idx_mutex);
5346 serge 1296
	init_rwsem(&rdev->pm.mclk_lock);
1297
	init_rwsem(&rdev->exclusive_lock);
2997 Serge 1298
	init_waitqueue_head(&rdev->irq.vblank_queue);
5271 serge 1299
	mutex_init(&rdev->mn_lock);
1300
//	hash_init(rdev->mn_hash);
2997 Serge 1301
	r = radeon_gem_init(rdev);
1302
	if (r)
1303
		return r;
5078 serge 1304
 
1305
	radeon_check_arguments(rdev);
2997 Serge 1306
	/* Adjust VM size here.
5078 serge 1307
	 * Max GPUVM size for cayman+ is 40 bits.
2997 Serge 1308
	 */
5078 serge 1309
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1117 serge 1310
 
1179 serge 1311
	/* Set asic functions */
1312
	r = radeon_asic_init(rdev);
1404 serge 1313
	if (r)
1179 serge 1314
		return r;
1315
 
1963 serge 1316
	/* all of the newer IGP chips have an internal gart
1317
	 * However some rs4xx report as AGP, so remove that here.
1318
	 */
1319
	if ((rdev->family >= CHIP_RS400) &&
1320
	    (rdev->flags & RADEON_IS_IGP)) {
1321
		rdev->flags &= ~RADEON_IS_AGP;
1322
	}
1323
 
1321 serge 1324
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1221 serge 1325
		radeon_agp_disable(rdev);
6104 serge 1326
	}
1117 serge 1327
 
3764 Serge 1328
	/* Set the internal MC address mask
1329
	 * This is the max address of the GPU's
1330
	 * internal address space.
1331
	 */
1332
	if (rdev->family >= CHIP_CAYMAN)
1333
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1334
	else if (rdev->family >= CHIP_CEDAR)
1335
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1336
	else
1337
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1338
 
1179 serge 1339
	/* set DMA mask + need_dma32 flags.
1340
	 * PCIE - can handle 40-bits.
2997 Serge 1341
	 * IGP - can handle 40-bits
1179 serge 1342
	 * AGP - generally dma32 is safest
2997 Serge 1343
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1179 serge 1344
	 */
1345
	rdev->need_dma32 = false;
1346
	if (rdev->flags & RADEON_IS_AGP)
1347
		rdev->need_dma32 = true;
2997 Serge 1348
	if ((rdev->flags & RADEON_IS_PCI) &&
1349
	    (rdev->family <= CHIP_RS740))
1179 serge 1350
		rdev->need_dma32 = true;
1117 serge 1351
 
1352
 
6104 serge 1353
	/* Registers mapping */
1354
	/* TODO: block userspace mapping of io register */
3192 Serge 1355
	spin_lock_init(&rdev->mmio_idx_lock);
5078 serge 1356
	spin_lock_init(&rdev->smc_idx_lock);
1357
	spin_lock_init(&rdev->pll_idx_lock);
1358
	spin_lock_init(&rdev->mc_idx_lock);
1359
	spin_lock_init(&rdev->pcie_idx_lock);
1360
	spin_lock_init(&rdev->pciep_idx_lock);
1361
	spin_lock_init(&rdev->pif_idx_lock);
1362
	spin_lock_init(&rdev->cg_idx_lock);
1363
	spin_lock_init(&rdev->uvd_idx_lock);
1364
	spin_lock_init(&rdev->rcu_idx_lock);
1365
	spin_lock_init(&rdev->didt_idx_lock);
1366
	spin_lock_init(&rdev->end_idx_lock);
1367
	if (rdev->family >= CHIP_BONAIRE) {
1368
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1369
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1370
	} else {
6104 serge 1371
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1372
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
5078 serge 1373
	}
2997 Serge 1374
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
6104 serge 1375
	if (rdev->rmmio == NULL) {
1376
		return -ENOMEM;
1377
	}
1378
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1379
	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1117 serge 1380
 
5078 serge 1381
	/* doorbell bar mapping */
1382
	if (rdev->family >= CHIP_BONAIRE)
1383
		radeon_doorbell_init(rdev);
1384
 
2997 Serge 1385
	/* io port mapping */
1386
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1387
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1388
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1389
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1390
			break;
1391
		}
1392
	}
1393
	if (rdev->rio_mem == NULL)
1394
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1395
 
5078 serge 1396
	if (rdev->flags & RADEON_IS_PX)
1397
		radeon_device_handle_px_quirks(rdev);
1398
	if (rdev->flags & RADEON_IS_PX)
1399
		runtime = true;
2997 Serge 1400
 
1179 serge 1401
	r = radeon_init(rdev);
1221 serge 1402
	if (r)
6104 serge 1403
		goto failed;
1117 serge 1404
 
3192 Serge 1405
 
5078 serge 1406
 
1221 serge 1407
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1408
		/* Acceleration not working on AGP card try again
1409
		 * with fallback to PCI or PCIE GART
1410
		 */
1963 serge 1411
		radeon_asic_reset(rdev);
1221 serge 1412
		radeon_fini(rdev);
1413
		radeon_agp_disable(rdev);
1414
		r = radeon_init(rdev);
1415
		if (r)
6104 serge 1416
			goto failed;
1126 serge 1417
	}
5078 serge 1418
 
5271 serge 1419
//   r = radeon_ib_ring_tests(rdev);
1420
//   if (r)
1421
//       DRM_ERROR("ib ring test failed (%d).\n", r);
1422
 
5078 serge 1423
	if ((radeon_testing & 1)) {
1424
		if (rdev->accel_working)
1425
			radeon_test_moves(rdev);
1426
		else
1427
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1428
	}
1429
	if ((radeon_testing & 2)) {
1430
		if (rdev->accel_working)
1431
			radeon_test_syncing(rdev);
1432
		else
1433
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1434
	}
6104 serge 1435
	if (radeon_benchmarking) {
5078 serge 1436
		if (rdev->accel_working)
6104 serge 1437
			radeon_benchmark(rdev, radeon_benchmarking);
5078 serge 1438
		else
1439
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
6104 serge 1440
	}
1179 serge 1441
	return 0;
6104 serge 1442
 
1443
failed:
1444
	return r;
1117 serge 1445
}
1446
 
2997 Serge 1447
/**
1448
 * radeon_gpu_reset - reset the asic
1449
 *
1450
 * @rdev: radeon device pointer
1451
 *
1452
 * Attempt the reset the GPU if it has hung (all asics).
1453
 * Returns 0 for success or an error on failure.
1454
 */
1455
int radeon_gpu_reset(struct radeon_device *rdev)
1456
{
1457
    unsigned ring_sizes[RADEON_NUM_RINGS];
1458
    uint32_t *ring_data[RADEON_NUM_RINGS];
1179 serge 1459
 
2997 Serge 1460
    bool saved = false;
1461
 
1462
    int i, r;
1463
    int resched;
1464
 
5346 serge 1465
	down_write(&rdev->exclusive_lock);
5078 serge 1466
 
5346 serge 1467
	if (!rdev->needs_reset) {
1468
		up_write(&rdev->exclusive_lock);
1469
		return 0;
1470
	}
1471
 
6661 serge 1472
	atomic_inc(&rdev->gpu_reset_counter);
1473
 
2997 Serge 1474
    radeon_save_bios_scratch_regs(rdev);
1475
    /* block TTM */
1476
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1477
    radeon_suspend(rdev);
1478
 
1479
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1480
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1481
                           &ring_data[i]);
1482
        if (ring_sizes[i]) {
1483
            saved = true;
1484
            dev_info(rdev->dev, "Saved %d dwords of commands "
1485
                 "on ring %d.\n", ring_sizes[i], i);
1486
        }
1487
    }
1488
 
1489
    r = radeon_asic_reset(rdev);
1490
    if (!r) {
1491
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1492
        radeon_resume(rdev);
1493
    }
1494
 
1495
    radeon_restore_bios_scratch_regs(rdev);
1496
 
1497
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
5271 serge 1498
		if (!r && ring_data[i]) {
2997 Serge 1499
            radeon_ring_restore(rdev, &rdev->ring[i],
1500
                        ring_sizes[i], ring_data[i]);
1501
    } else {
5271 serge 1502
			radeon_fence_driver_force_completion(rdev, i);
2997 Serge 1503
            kfree(ring_data[i]);
1504
        }
1505
    }
1506
 
1507
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1508
    if (r) {
1509
        /* bad news, how to tell it to userspace ? */
1510
        dev_info(rdev->dev, "GPU reset failed\n");
1511
    }
1512
 
5346 serge 1513
	rdev->needs_reset = r == -EAGAIN;
1514
	rdev->in_reset = false;
1515
 
1516
	up_read(&rdev->exclusive_lock);
2997 Serge 1517
    return r;
1518
}
1519
 
1520
 
1117 serge 1521
/*
1522
 * Driver load/unload
1523
 */
1524
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1525
{
1526
    struct radeon_device *rdev;
1527
    int r;
1528
 
1529
 
1120 serge 1530
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1117 serge 1531
    if (rdev == NULL) {
1532
        return -ENOMEM;
1533
    };
1534
 
1535
    dev->dev_private = (void *)rdev;
1536
 
1537
    /* update BUS flag */
5097 serge 1538
    if (drm_pci_device_is_agp(dev)) {
1117 serge 1539
        flags |= RADEON_IS_AGP;
1239 serge 1540
    } else if (drm_device_is_pcie(dev)) {
1541
        flags |= RADEON_IS_PCIE;
1542
    } else {
1543
        flags |= RADEON_IS_PCI;
1544
    }
1117 serge 1545
 
1182 serge 1546
    /* radeon_device_init should report only fatal error
1547
     * like memory allocation failure or iomapping failure,
1548
     * or memory manager initialization failure, it must
1549
     * properly initialize the GPU MC controller and permit
1550
     * VRAM allocation
1551
     */
1117 serge 1552
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1553
    if (r) {
1182 serge 1554
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1117 serge 1555
        return r;
1556
    }
1182 serge 1557
    /* Again modeset_init should fail only on fatal error
1558
     * otherwise it should provide enough functionalities
1559
     * for shadowfb to run
1560
     */
5078 serge 1561
    main_device = dev;
1562
 
1246 serge 1563
    if( radeon_modeset )
1564
    {
1268 serge 1565
        r = radeon_modeset_init(rdev);
1566
        if (r) {
1567
            return r;
1568
        }
5078 serge 1569
        init_display_kms(dev, &usermode);
1570
    }
1986 serge 1571
    else
5078 serge 1572
        init_display(rdev, &usermode);
1126 serge 1573
 
1117 serge 1574
    return 0;
5078 serge 1575
}
1117 serge 1576
 
1577
 
1221 serge 1578
 
1117 serge 1579
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1580
{
1581
    return pci_resource_start(dev->pdev, resource);
1582
}
1583
 
1584
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1585
{
1586
    return pci_resource_len(dev->pdev, resource);
1587
}
1588
 
1123 serge 1589
 
1590
uint32_t __div64_32(uint64_t *n, uint32_t base)
1591
{
1592
        uint64_t rem = *n;
1593
        uint64_t b = base;
1594
        uint64_t res, d = 1;
1595
        uint32_t high = rem >> 32;
1596
 
1597
        /* Reduce the thing a bit first */
1598
        res = 0;
1599
        if (high >= base) {
1600
                high /= base;
1601
                res = (uint64_t) high << 32;
1602
                rem -= (uint64_t) (high*base) << 32;
1603
        }
1604
 
1605
        while ((int64_t)b > 0 && b < rem) {
1606
                b = b+b;
1607
                d = d+d;
1608
        }
1609
 
1610
        do {
1611
                if (rem >= b) {
1612
                        rem -= b;
1613
                        res += d;
1614
                }
1615
                b >>= 1;
1616
                d >>= 1;
1617
        } while (d);
1618
 
1619
        *n = res;
1620
        return rem;
1621
}
1622
 
1239 serge 1623
static struct pci_device_id pciidlist[] = {
1624
    radeon_PCI_IDS
1625
};
1626
 
6104 serge 1627
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1628
int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1629
void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1630
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
1631
				    int *max_error,
1632
				    struct timeval *vblank_time,
1633
				    unsigned flags);
1634
void radeon_gem_object_free(struct drm_gem_object *obj);
5078 serge 1635
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1636
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1637
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1638
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1239 serge 1639
 
1640
 
5078 serge 1641
static struct drm_driver kms_driver = {
1642
    .driver_features =
1643
        DRIVER_USE_AGP |
1644
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1645
        DRIVER_PRIME | DRIVER_RENDER,
1646
    .load = radeon_driver_load_kms,
1647
//    .open = radeon_driver_open_kms,
1648
//    .preclose = radeon_driver_preclose_kms,
1649
//    .postclose = radeon_driver_postclose_kms,
1650
//    .lastclose = radeon_driver_lastclose_kms,
1651
//    .unload = radeon_driver_unload_kms,
6104 serge 1652
    .get_vblank_counter = radeon_get_vblank_counter_kms,
1653
    .enable_vblank = radeon_enable_vblank_kms,
1654
    .disable_vblank = radeon_disable_vblank_kms,
1655
    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1656
    .get_scanout_position = radeon_get_crtc_scanoutpos,
5078 serge 1657
#if defined(CONFIG_DEBUG_FS)
1658
    .debugfs_init = radeon_debugfs_init,
1659
    .debugfs_cleanup = radeon_debugfs_cleanup,
1660
#endif
1661
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1662
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1663
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1664
    .irq_handler = radeon_driver_irq_handler_kms,
1665
//    .ioctls = radeon_ioctls_kms,
6104 serge 1666
    .gem_free_object = radeon_gem_object_free,
5078 serge 1667
//    .gem_open_object = radeon_gem_object_open,
1668
//    .gem_close_object = radeon_gem_object_close,
1669
//    .dumb_create = radeon_mode_dumb_create,
1670
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1671
//    .dumb_destroy = drm_gem_dumb_destroy,
1672
//    .fops = &radeon_driver_kms_fops,
3120 serge 1673
 
5078 serge 1674
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1675
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1676
//    .gem_prime_export = drm_gem_prime_export,
1677
//    .gem_prime_import = drm_gem_prime_import,
1678
//    .gem_prime_pin = radeon_gem_prime_pin,
1679
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1680
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1681
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1682
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1683
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1239 serge 1684
 
5078 serge 1685
};
2007 serge 1686
 
5078 serge 1687
int ati_init(void)
1239 serge 1688
{
5078 serge 1689
    static pci_dev_t device;
2997 Serge 1690
    const struct pci_device_id  *ent;
5078 serge 1691
    int  err;
1239 serge 1692
 
1693
    ent = find_pci_device(&device, pciidlist);
1694
    if( unlikely(ent == NULL) )
1695
    {
1696
        dbgprintf("device not found\n");
5078 serge 1697
        return -ENODEV;
1239 serge 1698
    };
1699
 
5078 serge 1700
    drm_core_init();
1701
 
1702
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1239 serge 1703
                                device.pci_dev.device);
1704
 
5078 serge 1705
    kms_driver.driver_features |= DRIVER_MODESET;
3764 Serge 1706
 
5078 serge 1707
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1239 serge 1708
 
1246 serge 1709
    return err;
5078 serge 1710
}
1430 serge 1711