Subversion Repositories Kolibri OS

Rev

Rev 5346 | Rev 6661 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
2997 Serge 29
#include 
1179 serge 30
#include 
31
#include 
1221 serge 32
#include 
6104 serge 33
#include 
1117 serge 34
#include "radeon_reg.h"
35
#include "radeon.h"
36
#include "atom.h"
3120 serge 37
 
38
#include "bitmap.h"
1428 serge 39
#include "display.h"
1117 serge 40
 
3120 serge 41
 
1221 serge 42
#include 
43
 
5078 serge 44
#define PCI_VENDOR_ID_ATI               0x1002
45
#define PCI_VENDOR_ID_APPLE             0x106b
1117 serge 46
 
5078 serge 47
int radeon_no_wb;
1430 serge 48
int radeon_modeset = -1;
49
int radeon_dynclks = -1;
50
int radeon_r4xx_atom = 0;
51
int radeon_agpmode = 0;
52
int radeon_vram_limit = 0;
5078 serge 53
int radeon_gart_size = -1; /* auto */
1430 serge 54
int radeon_benchmarking = 0;
55
int radeon_testing = 0;
56
int radeon_connector_table = 0;
57
int radeon_tv = 1;
5078 serge 58
int radeon_audio = -1;
59
int radeon_disp_priority = 0;
1963 serge 60
int radeon_hw_i2c = 0;
5078 serge 61
int radeon_pcie_gen2 = -1;
62
int radeon_msi = -1;
2997 Serge 63
int radeon_lockup_timeout = 10000;
3764 Serge 64
int radeon_fastfb = 0;
5078 serge 65
int radeon_dpm = -1;
66
int radeon_aspm = -1;
67
int radeon_runtime_pm = -1;
68
int radeon_hard_reset = 0;
69
int radeon_vm_size = 8;
70
int radeon_vm_block_size = -1;
71
int radeon_deep_color = 0;
72
int radeon_use_pflipirq = 2;
2160 serge 73
int irq_override = 0;
5078 serge 74
int radeon_bapm = -1;
5271 serge 75
int radeon_backlight = 0;
6104 serge 76
int radeon_auxch = -1;
77
int radeon_mst = 0;
78
 
5078 serge 79
extern display_t *os_display;
80
extern struct drm_device *main_device;
81
extern videomode_t usermode;
1246 serge 82
 
3120 serge 83
 
1404 serge 84
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
85
int init_display(struct radeon_device *rdev, videomode_t *mode);
5078 serge 86
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
1117 serge 87
 
5271 serge 88
int get_modes(videomode_t *mode, u32 *count);
1404 serge 89
int set_user_mode(videomode_t *mode);
1428 serge 90
int r100_2D_test(struct radeon_device *rdev);
1239 serge 91
 
1404 serge 92
 
1233 serge 93
 /* Legacy VGA regions */
94
#define VGA_RSRC_NONE          0x00
95
#define VGA_RSRC_LEGACY_IO     0x01
96
#define VGA_RSRC_LEGACY_MEM    0x02
97
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
98
/* Non-legacy access */
99
#define VGA_RSRC_NORMAL_IO     0x04
100
#define VGA_RSRC_NORMAL_MEM    0x08
101
 
102
 
1963 serge 103
static const char radeon_family_name[][16] = {
104
	"R100",
105
	"RV100",
106
	"RS100",
107
	"RV200",
108
	"RS200",
109
	"R200",
110
	"RV250",
111
	"RS300",
112
	"RV280",
113
	"R300",
114
	"R350",
115
	"RV350",
116
	"RV380",
117
	"R420",
118
	"R423",
119
	"RV410",
120
	"RS400",
121
	"RS480",
122
	"RS600",
123
	"RS690",
124
	"RS740",
125
	"RV515",
126
	"R520",
127
	"RV530",
128
	"RV560",
129
	"RV570",
130
	"R580",
131
	"R600",
132
	"RV610",
133
	"RV630",
134
	"RV670",
135
	"RV620",
136
	"RV635",
137
	"RS780",
138
	"RS880",
139
	"RV770",
140
	"RV730",
141
	"RV710",
142
	"RV740",
143
	"CEDAR",
144
	"REDWOOD",
145
	"JUNIPER",
146
	"CYPRESS",
147
	"HEMLOCK",
148
	"PALM",
1986 serge 149
	"SUMO",
150
	"SUMO2",
1963 serge 151
	"BARTS",
152
	"TURKS",
153
	"CAICOS",
154
	"CAYMAN",
2997 Serge 155
	"ARUBA",
156
	"TAHITI",
157
	"PITCAIRN",
158
	"VERDE",
3764 Serge 159
	"OLAND",
160
	"HAINAN",
5078 serge 161
	"BONAIRE",
162
	"KAVERI",
163
	"KABINI",
164
	"HAWAII",
165
	"MULLINS",
1963 serge 166
	"LAST",
167
};
1233 serge 168
 
5078 serge 169
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
170
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
171
 
172
struct radeon_px_quirk {
173
	u32 chip_vendor;
174
	u32 chip_device;
175
	u32 subsys_vendor;
176
	u32 subsys_device;
177
	u32 px_quirk_flags;
178
};
179
 
180
static struct radeon_px_quirk radeon_px_quirk_list[] = {
181
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
182
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
183
	 */
184
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
185
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
186
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
187
	 */
188
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
5179 serge 189
	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
190
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
191
	 */
192
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
5078 serge 193
	/* macbook pro 8.2 */
194
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
195
	{ 0, 0, 0, 0, 0 },
196
};
197
 
198
bool radeon_is_px(struct drm_device *dev)
199
{
200
	struct radeon_device *rdev = dev->dev_private;
201
 
202
	if (rdev->flags & RADEON_IS_PX)
203
		return true;
204
	return false;
205
}
206
 
207
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
208
{
209
	struct radeon_px_quirk *p = radeon_px_quirk_list;
210
 
211
	/* Apply PX quirks */
212
	while (p && p->chip_device != 0) {
213
		if (rdev->pdev->vendor == p->chip_vendor &&
214
		    rdev->pdev->device == p->chip_device &&
215
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
216
		    rdev->pdev->subsystem_device == p->subsys_device) {
217
			rdev->px_quirk_flags = p->px_quirk_flags;
218
			break;
219
		}
220
		++p;
221
	}
222
 
223
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
224
		rdev->flags &= ~RADEON_IS_PX;
225
}
226
 
2997 Serge 227
/**
3764 Serge 228
 * radeon_program_register_sequence - program an array of registers.
229
 *
230
 * @rdev: radeon_device pointer
231
 * @registers: pointer to the register array
232
 * @array_size: size of the register array
233
 *
234
 * Programs an array or registers with and and or masks.
235
 * This is a helper for setting golden registers.
236
 */
237
void radeon_program_register_sequence(struct radeon_device *rdev,
238
				      const u32 *registers,
239
				      const u32 array_size)
240
{
241
	u32 tmp, reg, and_mask, or_mask;
242
	int i;
243
 
244
	if (array_size % 3)
245
		return;
246
 
247
	for (i = 0; i < array_size; i +=3) {
248
		reg = registers[i + 0];
249
		and_mask = registers[i + 1];
250
		or_mask = registers[i + 2];
251
 
252
		if (and_mask == 0xffffffff) {
253
			tmp = or_mask;
254
		} else {
255
			tmp = RREG32(reg);
256
			tmp &= ~and_mask;
257
			tmp |= or_mask;
258
		}
259
		WREG32(reg, tmp);
260
	}
261
}
262
 
5078 serge 263
void radeon_pci_config_reset(struct radeon_device *rdev)
264
{
265
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
266
}
267
 
3764 Serge 268
/**
2997 Serge 269
 * radeon_surface_init - Clear GPU surface registers.
270
 *
271
 * @rdev: radeon_device pointer
272
 *
273
 * Clear GPU surface registers (r1xx-r5xx).
1117 serge 274
 */
1179 serge 275
void radeon_surface_init(struct radeon_device *rdev)
1117 serge 276
{
6104 serge 277
	/* FIXME: check this out */
278
	if (rdev->family < CHIP_R600) {
279
		int i;
1117 serge 280
 
1321 serge 281
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
5078 serge 282
			if (rdev->surface_regs[i].bo)
283
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
284
			else
6104 serge 285
				radeon_clear_surface_reg(rdev, i);
286
		}
1179 serge 287
		/* enable surfaces */
288
		WREG32(RADEON_SURFACE_CNTL, 0);
6104 serge 289
	}
1117 serge 290
}
291
 
292
/*
293
 * GPU scratch registers helpers function.
294
 */
2997 Serge 295
/**
296
 * radeon_scratch_init - Init scratch register driver information.
297
 *
298
 * @rdev: radeon_device pointer
299
 *
300
 * Init CP scratch register driver information (r1xx-r5xx)
301
 */
1179 serge 302
void radeon_scratch_init(struct radeon_device *rdev)
1117 serge 303
{
6104 serge 304
	int i;
1117 serge 305
 
6104 serge 306
	/* FIXME: check this out */
307
	if (rdev->family < CHIP_R300) {
308
		rdev->scratch.num_reg = 5;
309
	} else {
310
		rdev->scratch.num_reg = 7;
311
	}
1963 serge 312
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
6104 serge 313
	for (i = 0; i < rdev->scratch.num_reg; i++) {
314
		rdev->scratch.free[i] = true;
1963 serge 315
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
6104 serge 316
	}
1117 serge 317
}
318
 
2997 Serge 319
/**
320
 * radeon_scratch_get - Allocate a scratch register
321
 *
322
 * @rdev: radeon_device pointer
323
 * @reg: scratch register mmio offset
324
 *
325
 * Allocate a CP scratch register for use by the driver (all asics).
326
 * Returns 0 on success or -EINVAL on failure.
327
 */
1117 serge 328
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
329
{
330
	int i;
331
 
332
	for (i = 0; i < rdev->scratch.num_reg; i++) {
333
		if (rdev->scratch.free[i]) {
334
			rdev->scratch.free[i] = false;
335
			*reg = rdev->scratch.reg[i];
336
			return 0;
337
		}
338
	}
339
	return -EINVAL;
340
}
341
 
2997 Serge 342
/**
343
 * radeon_scratch_free - Free a scratch register
344
 *
345
 * @rdev: radeon_device pointer
346
 * @reg: scratch register mmio offset
347
 *
348
 * Free a CP scratch register allocated for use by the driver (all asics)
349
 */
1117 serge 350
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
351
{
352
	int i;
353
 
354
	for (i = 0; i < rdev->scratch.num_reg; i++) {
355
		if (rdev->scratch.reg[i] == reg) {
356
			rdev->scratch.free[i] = true;
357
			return;
358
		}
359
	}
360
}
361
 
2997 Serge 362
/*
5078 serge 363
 * GPU doorbell aperture helpers function.
364
 */
365
/**
366
 * radeon_doorbell_init - Init doorbell driver information.
367
 *
368
 * @rdev: radeon_device pointer
369
 *
370
 * Init doorbell driver information (CIK)
371
 * Returns 0 on success, error on failure.
372
 */
373
static int radeon_doorbell_init(struct radeon_device *rdev)
374
{
375
	/* doorbell bar mapping */
376
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
377
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
378
 
379
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
380
	if (rdev->doorbell.num_doorbells == 0)
381
		return -EINVAL;
382
 
383
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
384
	if (rdev->doorbell.ptr == NULL) {
385
		return -ENOMEM;
386
	}
387
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
388
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
389
 
390
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
391
 
392
	return 0;
393
}
394
 
395
/**
396
 * radeon_doorbell_fini - Tear down doorbell driver information.
397
 *
398
 * @rdev: radeon_device pointer
399
 *
400
 * Tear down doorbell driver information (CIK)
401
 */
402
static void radeon_doorbell_fini(struct radeon_device *rdev)
403
{
404
	iounmap(rdev->doorbell.ptr);
405
	rdev->doorbell.ptr = NULL;
406
}
407
 
408
/**
409
 * radeon_doorbell_get - Allocate a doorbell entry
410
 *
411
 * @rdev: radeon_device pointer
412
 * @doorbell: doorbell index
413
 *
414
 * Allocate a doorbell for use by the driver (all asics).
415
 * Returns 0 on success or -EINVAL on failure.
416
 */
417
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
418
{
419
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
420
	if (offset < rdev->doorbell.num_doorbells) {
421
		__set_bit(offset, rdev->doorbell.used);
422
		*doorbell = offset;
423
		return 0;
424
	} else {
425
		return -EINVAL;
426
	}
427
}
428
 
429
/**
430
 * radeon_doorbell_free - Free a doorbell entry
431
 *
432
 * @rdev: radeon_device pointer
433
 * @doorbell: doorbell index
434
 *
435
 * Free a doorbell allocated for use by the driver (all asics)
436
 */
437
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
438
{
439
	if (doorbell < rdev->doorbell.num_doorbells)
440
		__clear_bit(doorbell, rdev->doorbell.used);
441
}
442
 
5271 serge 443
/**
444
 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
445
 *                                setup KFD
446
 *
447
 * @rdev: radeon_device pointer
448
 * @aperture_base: output returning doorbell aperture base physical address
449
 * @aperture_size: output returning doorbell aperture size in bytes
450
 * @start_offset: output returning # of doorbell bytes reserved for radeon.
451
 *
452
 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
453
 * takes doorbells required for its own rings and reports the setup to KFD.
454
 * Radeon reserved doorbells are at the start of the doorbell aperture.
455
 */
456
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
457
				  phys_addr_t *aperture_base,
458
				  size_t *aperture_size,
459
				  size_t *start_offset)
460
{
461
	/* The first num_doorbells are used by radeon.
462
	 * KFD takes whatever's left in the aperture. */
463
	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
464
		*aperture_base = rdev->doorbell.base;
465
		*aperture_size = rdev->doorbell.size;
466
		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
467
	} else {
468
		*aperture_base = 0;
469
		*aperture_size = 0;
470
		*start_offset = 0;
471
	}
472
}
473
 
5078 serge 474
/*
2997 Serge 475
 * radeon_wb_*()
476
 * Writeback is the the method by which the the GPU updates special pages
477
 * in memory with the status of certain GPU events (fences, ring pointers,
478
 * etc.).
479
 */
480
 
481
/**
482
 * radeon_wb_disable - Disable Writeback
483
 *
484
 * @rdev: radeon_device pointer
485
 *
486
 * Disables Writeback (all asics).  Used for suspend.
487
 */
2004 serge 488
void radeon_wb_disable(struct radeon_device *rdev)
489
{
490
	rdev->wb.enabled = false;
491
}
492
 
2997 Serge 493
/**
494
 * radeon_wb_fini - Disable Writeback and free memory
495
 *
496
 * @rdev: radeon_device pointer
497
 *
498
 * Disables Writeback and frees the Writeback memory (all asics).
499
 * Used at driver shutdown.
500
 */
2004 serge 501
void radeon_wb_fini(struct radeon_device *rdev)
502
{
503
	radeon_wb_disable(rdev);
504
	if (rdev->wb.wb_obj) {
5078 serge 505
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
506
			radeon_bo_kunmap(rdev->wb.wb_obj);
507
			radeon_bo_unpin(rdev->wb.wb_obj);
508
			radeon_bo_unreserve(rdev->wb.wb_obj);
509
		}
2004 serge 510
		radeon_bo_unref(&rdev->wb.wb_obj);
511
		rdev->wb.wb = NULL;
512
		rdev->wb.wb_obj = NULL;
513
	}
514
}
515
 
2997 Serge 516
/**
517
 * radeon_wb_init- Init Writeback driver info and allocate memory
518
 *
519
 * @rdev: radeon_device pointer
520
 *
521
 * Disables Writeback and frees the Writeback memory (all asics).
522
 * Used at driver startup.
523
 * Returns 0 on success or an -error on failure.
524
 */
2004 serge 525
int radeon_wb_init(struct radeon_device *rdev)
526
{
527
	int r;
528
 
529
	if (rdev->wb.wb_obj == NULL) {
530
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
5271 serge 531
				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
5078 serge 532
				     &rdev->wb.wb_obj);
2004 serge 533
		if (r) {
534
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
535
			return r;
536
		}
6104 serge 537
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
538
		if (unlikely(r != 0)) {
539
			radeon_wb_fini(rdev);
540
			return r;
541
		}
542
		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
543
				&rdev->wb.gpu_addr);
544
		if (r) {
545
			radeon_bo_unreserve(rdev->wb.wb_obj);
546
			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
547
			radeon_wb_fini(rdev);
548
			return r;
549
		}
550
		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2004 serge 551
		radeon_bo_unreserve(rdev->wb.wb_obj);
6104 serge 552
		if (r) {
553
			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
554
			radeon_wb_fini(rdev);
555
			return r;
556
		}
2004 serge 557
	}
558
 
559
	/* clear wb memory */
560
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
561
	/* disable event_write fences */
562
	rdev->wb.use_event = false;
563
	/* disabled via module param */
2997 Serge 564
	if (radeon_no_wb == 1) {
2004 serge 565
		rdev->wb.enabled = false;
2997 Serge 566
	} else {
567
		if (rdev->flags & RADEON_IS_AGP) {
6104 serge 568
			/* often unreliable on AGP */
2997 Serge 569
			rdev->wb.enabled = false;
570
		} else if (rdev->family < CHIP_R300) {
571
			/* often unreliable on pre-r300 */
572
			rdev->wb.enabled = false;
573
		} else {
2004 serge 574
			rdev->wb.enabled = true;
575
			/* event_write fences are only available on r600+ */
2997 Serge 576
			if (rdev->family >= CHIP_R600) {
2004 serge 577
				rdev->wb.use_event = true;
6104 serge 578
			}
2997 Serge 579
		}
580
	}
581
	/* always use writeback/events on NI, APUs */
582
	if (rdev->family >= CHIP_PALM) {
2004 serge 583
		rdev->wb.enabled = true;
584
		rdev->wb.use_event = true;
585
	}
586
 
587
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
588
 
589
	return 0;
590
}
591
 
1430 serge 592
/**
593
 * radeon_vram_location - try to find VRAM location
594
 * @rdev: radeon device structure holding all necessary informations
595
 * @mc: memory controller structure holding memory informations
596
 * @base: base address at which to put VRAM
597
 *
598
 * Function will place try to place VRAM at base address provided
599
 * as parameter (which is so far either PCI aperture address or
600
 * for IGP TOM base address).
601
 *
602
 * If there is not enough space to fit the unvisible VRAM in the 32bits
603
 * address space then we limit the VRAM size to the aperture.
604
 *
605
 * If we are using AGP and if the AGP aperture doesn't allow us to have
606
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
607
 * size and print a warning.
608
 *
609
 * This function will never fails, worst case are limiting VRAM.
610
 *
611
 * Note: GTT start, end, size should be initialized before calling this
612
 * function on AGP platform.
613
 *
1963 serge 614
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
1430 serge 615
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
616
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
617
 * not IGP.
618
 *
619
 * Note: we use mc_vram_size as on some board we need to program the mc to
620
 * cover the whole aperture even if VRAM size is inferior to aperture size
621
 * Novell bug 204882 + along with lots of ubuntu ones
622
 *
623
 * Note: when limiting vram it's safe to overwritte real_vram_size because
624
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
625
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
626
 * ones)
627
 *
628
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
629
 * explicitly check for that thought.
630
 *
631
 * FIXME: when reducing VRAM size align new size on power of 2.
1117 serge 632
 */
1430 serge 633
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
1117 serge 634
{
2997 Serge 635
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
636
 
1430 serge 637
	mc->vram_start = base;
3764 Serge 638
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
1430 serge 639
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
640
		mc->real_vram_size = mc->aper_size;
641
		mc->mc_vram_size = mc->aper_size;
642
	}
643
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1963 serge 644
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
1430 serge 645
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
646
		mc->real_vram_size = mc->aper_size;
647
		mc->mc_vram_size = mc->aper_size;
6104 serge 648
	}
1430 serge 649
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2997 Serge 650
	if (limit && limit < mc->real_vram_size)
651
		mc->real_vram_size = limit;
1963 serge 652
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
1430 serge 653
			mc->mc_vram_size >> 20, mc->vram_start,
654
			mc->vram_end, mc->real_vram_size >> 20);
655
}
1117 serge 656
 
1430 serge 657
/**
658
 * radeon_gtt_location - try to find GTT location
659
 * @rdev: radeon device structure holding all necessary informations
660
 * @mc: memory controller structure holding memory informations
661
 *
662
 * Function will place try to place GTT before or after VRAM.
663
 *
664
 * If GTT size is bigger than space left then we ajust GTT size.
665
 * Thus function will never fails.
666
 *
667
 * FIXME: when reducing GTT size align new size on power of 2.
668
 */
669
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
670
{
671
	u64 size_af, size_bf;
672
 
3764 Serge 673
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
1963 serge 674
	size_bf = mc->vram_start & ~mc->gtt_base_align;
1430 serge 675
	if (size_bf > size_af) {
676
		if (mc->gtt_size > size_bf) {
677
			dev_warn(rdev->dev, "limiting GTT\n");
678
			mc->gtt_size = size_bf;
1117 serge 679
		}
1963 serge 680
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
1430 serge 681
	} else {
682
		if (mc->gtt_size > size_af) {
683
			dev_warn(rdev->dev, "limiting GTT\n");
684
			mc->gtt_size = size_af;
1117 serge 685
		}
1963 serge 686
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
1117 serge 687
	}
1430 serge 688
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
1963 serge 689
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
1430 serge 690
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
1117 serge 691
}
692
 
693
/*
694
 * GPU helpers function.
695
 */
2997 Serge 696
/**
697
 * radeon_card_posted - check if the hw has already been initialized
698
 *
699
 * @rdev: radeon_device pointer
700
 *
701
 * Check if the asic has been initialized (all asics).
702
 * Used at driver startup.
703
 * Returns true if initialized or false if not.
704
 */
1179 serge 705
bool radeon_card_posted(struct radeon_device *rdev)
1117 serge 706
{
707
	uint32_t reg;
708
 
3764 Serge 709
	if (ASIC_IS_NODCE(rdev))
710
		goto check_memsize;
711
 
1117 serge 712
	/* first check CRTCs */
3764 Serge 713
	if (ASIC_IS_DCE4(rdev)) {
1430 serge 714
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
1963 serge 715
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3764 Serge 716
			if (rdev->num_crtc >= 4) {
717
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
718
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
719
			}
720
			if (rdev->num_crtc >= 6) {
721
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
6104 serge 722
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3764 Serge 723
			}
1430 serge 724
		if (reg & EVERGREEN_CRTC_MASTER_EN)
725
			return true;
726
	} else if (ASIC_IS_AVIVO(rdev)) {
1117 serge 727
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
728
		      RREG32(AVIVO_D2CRTC_CONTROL);
729
		if (reg & AVIVO_CRTC_EN) {
730
			return true;
731
		}
732
	} else {
733
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
734
		      RREG32(RADEON_CRTC2_GEN_CNTL);
735
		if (reg & RADEON_CRTC_EN) {
736
			return true;
737
		}
738
	}
739
 
3764 Serge 740
check_memsize:
1117 serge 741
	/* then check MEM_SIZE, in case the crtcs are off */
742
	if (rdev->family >= CHIP_R600)
743
		reg = RREG32(R600_CONFIG_MEMSIZE);
744
	else
745
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
746
 
747
	if (reg)
748
		return true;
749
 
750
	return false;
751
 
752
}
753
 
2997 Serge 754
/**
755
 * radeon_update_bandwidth_info - update display bandwidth params
756
 *
757
 * @rdev: radeon_device pointer
758
 *
759
 * Used when sclk/mclk are switched or display modes are set.
760
 * params are used to calculate display watermarks (all asics)
761
 */
1963 serge 762
void radeon_update_bandwidth_info(struct radeon_device *rdev)
763
{
764
	fixed20_12 a;
765
	u32 sclk = rdev->pm.current_sclk;
766
	u32 mclk = rdev->pm.current_mclk;
767
 
768
	/* sclk/mclk in Mhz */
6104 serge 769
	a.full = dfixed_const(100);
770
	rdev->pm.sclk.full = dfixed_const(sclk);
771
	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
772
	rdev->pm.mclk.full = dfixed_const(mclk);
773
	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
1963 serge 774
 
775
	if (rdev->flags & RADEON_IS_IGP) {
776
		a.full = dfixed_const(16);
777
		/* core_bandwidth = sclk(Mhz) * 16 */
778
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
779
	}
780
}
781
 
2997 Serge 782
/**
783
 * radeon_boot_test_post_card - check and possibly initialize the hw
784
 *
785
 * @rdev: radeon_device pointer
786
 *
787
 * Check if the asic is initialized and if not, attempt to initialize
788
 * it (all asics).
789
 * Returns true if initialized or false if not.
790
 */
1321 serge 791
bool radeon_boot_test_post_card(struct radeon_device *rdev)
792
{
793
	if (radeon_card_posted(rdev))
794
		return true;
795
 
796
	if (rdev->bios) {
797
		DRM_INFO("GPU not posted. posting now...\n");
798
		if (rdev->is_atom_bios)
799
			atom_asic_init(rdev->mode_info.atom_context);
800
		else
801
			radeon_combios_asic_init(rdev->ddev);
802
		return true;
803
	} else {
804
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
805
		return false;
806
	}
807
}
808
 
2997 Serge 809
/**
810
 * radeon_dummy_page_init - init dummy page used by the driver
811
 *
812
 * @rdev: radeon_device pointer
813
 *
814
 * Allocate the dummy page used by the driver (all asics).
815
 * This dummy page is used by the driver as a filler for gart entries
816
 * when pages are taken out of the GART
817
 * Returns 0 on sucess, -ENOMEM on failure.
818
 */
1233 serge 819
int radeon_dummy_page_init(struct radeon_device *rdev)
820
{
1430 serge 821
	if (rdev->dummy_page.page)
822
		return 0;
5078 serge 823
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
1233 serge 824
	if (rdev->dummy_page.page == NULL)
825
		return -ENOMEM;
5078 serge 826
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
827
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1233 serge 828
	return 0;
829
}
1117 serge 830
 
2997 Serge 831
/**
832
 * radeon_dummy_page_fini - free dummy page used by the driver
833
 *
834
 * @rdev: radeon_device pointer
835
 *
836
 * Frees the dummy page used by the driver (all asics).
837
 */
1233 serge 838
void radeon_dummy_page_fini(struct radeon_device *rdev)
839
{
840
	if (rdev->dummy_page.page == NULL)
841
		return;
5078 serge 842
 
1233 serge 843
	rdev->dummy_page.page = NULL;
844
}
845
 
846
 
1117 serge 847
/* ATOM accessor methods */
2997 Serge 848
/*
849
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
850
 * driver registers callbacks to access registers and the interpreter
851
 * in the driver parses the tables and executes then to program specific
852
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
853
 * atombios.h, and atom.c
854
 */
855
 
856
/**
857
 * cail_pll_read - read PLL register
858
 *
859
 * @info: atom card_info pointer
860
 * @reg: PLL register offset
861
 *
862
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
863
 * Returns the value of the PLL register.
864
 */
1117 serge 865
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
866
{
6104 serge 867
	struct radeon_device *rdev = info->dev->dev_private;
868
	uint32_t r;
1117 serge 869
 
6104 serge 870
	r = rdev->pll_rreg(rdev, reg);
871
	return r;
1117 serge 872
}
873
 
2997 Serge 874
/**
875
 * cail_pll_write - write PLL register
876
 *
877
 * @info: atom card_info pointer
878
 * @reg: PLL register offset
879
 * @val: value to write to the pll register
880
 *
881
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
882
 */
1117 serge 883
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
884
{
6104 serge 885
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 886
 
6104 serge 887
	rdev->pll_wreg(rdev, reg, val);
1117 serge 888
}
889
 
2997 Serge 890
/**
891
 * cail_mc_read - read MC (Memory Controller) register
892
 *
893
 * @info: atom card_info pointer
894
 * @reg: MC register offset
895
 *
896
 * Provides an MC register accessor for the atom interpreter (r4xx+).
897
 * Returns the value of the MC register.
898
 */
1117 serge 899
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
900
{
6104 serge 901
	struct radeon_device *rdev = info->dev->dev_private;
902
	uint32_t r;
1117 serge 903
 
6104 serge 904
	r = rdev->mc_rreg(rdev, reg);
905
	return r;
1117 serge 906
}
907
 
2997 Serge 908
/**
909
 * cail_mc_write - write MC (Memory Controller) register
910
 *
911
 * @info: atom card_info pointer
912
 * @reg: MC register offset
913
 * @val: value to write to the pll register
914
 *
915
 * Provides a MC register accessor for the atom interpreter (r4xx+).
916
 */
1117 serge 917
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
918
{
6104 serge 919
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 920
 
6104 serge 921
	rdev->mc_wreg(rdev, reg, val);
1117 serge 922
}
923
 
2997 Serge 924
/**
925
 * cail_reg_write - write MMIO register
926
 *
927
 * @info: atom card_info pointer
928
 * @reg: MMIO register offset
929
 * @val: value to write to the pll register
930
 *
931
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
932
 */
1117 serge 933
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
934
{
6104 serge 935
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 936
 
6104 serge 937
	WREG32(reg*4, val);
1117 serge 938
}
939
 
2997 Serge 940
/**
941
 * cail_reg_read - read MMIO register
942
 *
943
 * @info: atom card_info pointer
944
 * @reg: MMIO register offset
945
 *
946
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
947
 * Returns the value of the MMIO register.
948
 */
1117 serge 949
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
950
{
6104 serge 951
	struct radeon_device *rdev = info->dev->dev_private;
952
	uint32_t r;
1117 serge 953
 
6104 serge 954
	r = RREG32(reg*4);
955
	return r;
1117 serge 956
}
957
 
2997 Serge 958
/**
959
 * cail_ioreg_write - write IO register
960
 *
961
 * @info: atom card_info pointer
962
 * @reg: IO register offset
963
 * @val: value to write to the pll register
964
 *
965
 * Provides a IO register accessor for the atom interpreter (r4xx+).
966
 */
1963 serge 967
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
968
{
969
	struct radeon_device *rdev = info->dev->dev_private;
970
 
971
	WREG32_IO(reg*4, val);
972
}
973
 
2997 Serge 974
/**
975
 * cail_ioreg_read - read IO register
976
 *
977
 * @info: atom card_info pointer
978
 * @reg: IO register offset
979
 *
980
 * Provides an IO register accessor for the atom interpreter (r4xx+).
981
 * Returns the value of the IO register.
982
 */
1963 serge 983
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
984
{
985
	struct radeon_device *rdev = info->dev->dev_private;
986
	uint32_t r;
987
 
988
	r = RREG32_IO(reg*4);
989
	return r;
990
}
991
 
2997 Serge 992
/**
993
 * radeon_atombios_init - init the driver info and callbacks for atombios
994
 *
995
 * @rdev: radeon_device pointer
996
 *
997
 * Initializes the driver info and register access callbacks for the
998
 * ATOM interpreter (r4xx+).
999
 * Returns 0 on sucess, -ENOMEM on failure.
1000
 * Called at driver startup.
1001
 */
1117 serge 1002
int radeon_atombios_init(struct radeon_device *rdev)
1003
{
1268 serge 1004
	struct card_info *atom_card_info =
1005
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1117 serge 1006
 
1268 serge 1007
	if (!atom_card_info)
1008
		return -ENOMEM;
1009
 
1010
	rdev->mode_info.atom_card_info = atom_card_info;
1011
	atom_card_info->dev = rdev->ddev;
1012
	atom_card_info->reg_read = cail_reg_read;
1013
	atom_card_info->reg_write = cail_reg_write;
1963 serge 1014
	/* needed for iio ops */
1015
	if (rdev->rio_mem) {
1016
		atom_card_info->ioreg_read = cail_ioreg_read;
1017
		atom_card_info->ioreg_write = cail_ioreg_write;
1018
	} else {
1019
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1020
		atom_card_info->ioreg_read = cail_reg_read;
1021
		atom_card_info->ioreg_write = cail_reg_write;
1022
	}
1268 serge 1023
	atom_card_info->mc_read = cail_mc_read;
1024
	atom_card_info->mc_write = cail_mc_write;
1025
	atom_card_info->pll_read = cail_pll_read;
1026
	atom_card_info->pll_write = cail_pll_write;
1027
 
1028
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
3764 Serge 1029
	if (!rdev->mode_info.atom_context) {
1030
		radeon_atombios_fini(rdev);
1031
		return -ENOMEM;
1032
	}
1033
 
1630 serge 1034
	mutex_init(&rdev->mode_info.atom_context->mutex);
5271 serge 1035
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
6104 serge 1036
	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1321 serge 1037
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
6104 serge 1038
	return 0;
1117 serge 1039
}
1040
 
2997 Serge 1041
/**
1042
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1043
 *
1044
 * @rdev: radeon_device pointer
1045
 *
1046
 * Frees the driver info and register access callbacks for the ATOM
1047
 * interpreter (r4xx+).
1048
 * Called at driver shutdown.
1049
 */
1117 serge 1050
void radeon_atombios_fini(struct radeon_device *rdev)
1051
{
1321 serge 1052
	if (rdev->mode_info.atom_context) {
1053
		kfree(rdev->mode_info.atom_context->scratch);
3764 Serge 1054
	}
1119 serge 1055
	kfree(rdev->mode_info.atom_context);
3764 Serge 1056
	rdev->mode_info.atom_context = NULL;
1268 serge 1057
	kfree(rdev->mode_info.atom_card_info);
3764 Serge 1058
	rdev->mode_info.atom_card_info = NULL;
1117 serge 1059
}
1060
 
2997 Serge 1061
/* COMBIOS */
1062
/*
1063
 * COMBIOS is the bios format prior to ATOM. It provides
1064
 * command tables similar to ATOM, but doesn't have a unified
1065
 * parser.  See radeon_combios.c
1066
 */
1067
 
1068
/**
1069
 * radeon_combios_init - init the driver info for combios
1070
 *
1071
 * @rdev: radeon_device pointer
1072
 *
1073
 * Initializes the driver info for combios (r1xx-r3xx).
1074
 * Returns 0 on sucess.
1075
 * Called at driver startup.
1076
 */
1117 serge 1077
int radeon_combios_init(struct radeon_device *rdev)
1078
{
1128 serge 1079
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1117 serge 1080
	return 0;
1081
}
1082
 
2997 Serge 1083
/**
1084
 * radeon_combios_fini - free the driver info for combios
1085
 *
1086
 * @rdev: radeon_device pointer
1087
 *
1088
 * Frees the driver info for combios (r1xx-r3xx).
1089
 * Called at driver shutdown.
1090
 */
1117 serge 1091
void radeon_combios_fini(struct radeon_device *rdev)
1092
{
1093
}
1094
 
2997 Serge 1095
/* if we get transitioned to only one device, take VGA back */
1096
/**
1097
 * radeon_vga_set_decode - enable/disable vga decode
1098
 *
1099
 * @cookie: radeon_device pointer
1100
 * @state: enable/disable vga decode
1101
 *
1102
 * Enable/disable vga decode (all asics).
1103
 * Returns VGA resource flags.
1104
 */
1233 serge 1105
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1106
{
1107
	struct radeon_device *rdev = cookie;
1108
	radeon_vga_set_state(rdev, state);
1109
	if (state)
1110
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1111
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1112
	else
1113
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1114
}
1117 serge 1115
 
2997 Serge 1116
/**
1117
 * radeon_check_pot_argument - check that argument is a power of two
1118
 *
1119
 * @arg: value to check
1120
 *
1121
 * Validates that a certain argument is a power of two (all asics).
1122
 * Returns true if argument is valid.
1123
 */
1124
static bool radeon_check_pot_argument(int arg)
1404 serge 1125
{
2997 Serge 1126
	return (arg & (arg - 1)) == 0;
1127
}
1128
 
1129
/**
6104 serge 1130
 * Determine a sensible default GART size according to ASIC family.
1131
 *
1132
 * @family ASIC family name
1133
 */
1134
static int radeon_gart_size_auto(enum radeon_family family)
1135
{
1136
	/* default to a larger gart size on newer asics */
1137
	if (family >= CHIP_TAHITI)
1138
		return 2048;
1139
	else if (family >= CHIP_RV770)
1140
		return 1024;
1141
	else
1142
		return 512;
1143
}
1144
 
1145
/**
2997 Serge 1146
 * radeon_check_arguments - validate module params
1147
 *
1148
 * @rdev: radeon_device pointer
1149
 *
1150
 * Validates certain module parameters and updates
1151
 * the associated values used by the driver (all asics).
1152
 */
1153
static void radeon_check_arguments(struct radeon_device *rdev)
1154
{
1404 serge 1155
	/* vramlimit must be a power of two */
2997 Serge 1156
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1404 serge 1157
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1158
				radeon_vram_limit);
1159
		radeon_vram_limit = 0;
1160
	}
2997 Serge 1161
 
5078 serge 1162
	if (radeon_gart_size == -1) {
6104 serge 1163
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
5078 serge 1164
	}
1404 serge 1165
	/* gtt size must be power of two and greater or equal to 32M */
2997 Serge 1166
	if (radeon_gart_size < 32) {
5078 serge 1167
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1404 serge 1168
				radeon_gart_size);
6104 serge 1169
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
2997 Serge 1170
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1404 serge 1171
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1172
				radeon_gart_size);
6104 serge 1173
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1404 serge 1174
	}
2997 Serge 1175
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1176
 
1404 serge 1177
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1178
	switch (radeon_agpmode) {
1179
	case -1:
1180
	case 0:
1181
	case 1:
1182
	case 2:
1183
	case 4:
1184
	case 8:
1185
		break;
1186
	default:
1187
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1188
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1189
		radeon_agpmode = 0;
1190
		break;
1191
	}
5078 serge 1192
 
1193
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1194
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1195
			 radeon_vm_size);
1196
		radeon_vm_size = 4;
1197
	}
1198
 
1199
	if (radeon_vm_size < 1) {
1200
		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1201
			 radeon_vm_size);
1202
		radeon_vm_size = 4;
1203
	}
1204
 
1205
       /*
1206
        * Max GPUVM size for Cayman, SI and CI are 40 bits.
1207
        */
1208
	if (radeon_vm_size > 1024) {
1209
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1210
			 radeon_vm_size);
1211
		radeon_vm_size = 4;
1212
	}
1213
 
1214
	/* defines number of bits in page table versus page directory,
1215
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1216
	 * page table and the remaining bits are in the page directory */
1217
	if (radeon_vm_block_size == -1) {
1218
 
1219
		/* Total bits covered by PD + PTs */
5179 serge 1220
		unsigned bits = ilog2(radeon_vm_size) + 18;
5078 serge 1221
 
1222
		/* Make sure the PD is 4K in size up to 8GB address space.
1223
		   Above that split equal between PD and PTs */
1224
		if (radeon_vm_size <= 8)
1225
			radeon_vm_block_size = bits - 9;
1226
		else
1227
			radeon_vm_block_size = (bits + 3) / 2;
1228
 
1229
	} else if (radeon_vm_block_size < 9) {
1230
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1231
			 radeon_vm_block_size);
1232
		radeon_vm_block_size = 9;
1233
	}
1234
 
1235
	if (radeon_vm_block_size > 24 ||
1236
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1237
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1238
			 radeon_vm_block_size);
1239
		radeon_vm_block_size = 9;
1240
	}
1404 serge 1241
}
1242
 
5078 serge 1243
/**
1244
 * radeon_device_init - initialize the driver
1245
 *
1246
 * @rdev: radeon_device pointer
1247
 * @pdev: drm dev pointer
1248
 * @pdev: pci dev pointer
1249
 * @flags: driver flags
1250
 *
1251
 * Initializes the driver info and hw (all asics).
1252
 * Returns 0 for success or an error on failure.
1253
 * Called at driver startup.
1254
 */
1117 serge 1255
int radeon_device_init(struct radeon_device *rdev,
6104 serge 1256
		       struct drm_device *ddev,
1257
		       struct pci_dev *pdev,
1258
		       uint32_t flags)
1117 serge 1259
{
1963 serge 1260
	int r, i;
1179 serge 1261
	int dma_bits;
5078 serge 1262
	bool runtime = false;
1117 serge 1263
 
6104 serge 1264
	rdev->shutdown = false;
5078 serge 1265
	rdev->dev = &pdev->dev;
6104 serge 1266
	rdev->ddev = ddev;
1267
	rdev->pdev = pdev;
1268
	rdev->flags = flags;
1269
	rdev->family = flags & RADEON_FAMILY_MASK;
1270
	rdev->is_atom_bios = false;
1271
	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
5078 serge 1272
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1221 serge 1273
	rdev->accel_working = false;
2997 Serge 1274
	/* set up ring ids */
1275
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1276
		rdev->ring[i].idx = i;
1277
	}
5271 serge 1278
	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1963 serge 1279
 
2997 Serge 1280
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1281
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1282
		pdev->subsystem_vendor, pdev->subsystem_device);
1963 serge 1283
 
6104 serge 1284
	/* mutex initialization are all done here so we
1285
	 * can recall function without having locking issues */
2997 Serge 1286
	mutex_init(&rdev->ring_lock);
1630 serge 1287
	mutex_init(&rdev->dc_hw_i2c_mutex);
2997 Serge 1288
	atomic_set(&rdev->ih.lock, 0);
1630 serge 1289
	mutex_init(&rdev->gem.mutex);
1290
	mutex_init(&rdev->pm.mutex);
2997 Serge 1291
	mutex_init(&rdev->gpu_clock_mutex);
5078 serge 1292
	mutex_init(&rdev->srbm_mutex);
5271 serge 1293
	mutex_init(&rdev->grbm_idx_mutex);
5346 serge 1294
	init_rwsem(&rdev->pm.mclk_lock);
1295
	init_rwsem(&rdev->exclusive_lock);
2997 Serge 1296
	init_waitqueue_head(&rdev->irq.vblank_queue);
5271 serge 1297
	mutex_init(&rdev->mn_lock);
1298
//	hash_init(rdev->mn_hash);
2997 Serge 1299
	r = radeon_gem_init(rdev);
1300
	if (r)
1301
		return r;
5078 serge 1302
 
1303
	radeon_check_arguments(rdev);
2997 Serge 1304
	/* Adjust VM size here.
5078 serge 1305
	 * Max GPUVM size for cayman+ is 40 bits.
2997 Serge 1306
	 */
5078 serge 1307
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1117 serge 1308
 
1179 serge 1309
	/* Set asic functions */
1310
	r = radeon_asic_init(rdev);
1404 serge 1311
	if (r)
1179 serge 1312
		return r;
1313
 
1963 serge 1314
	/* all of the newer IGP chips have an internal gart
1315
	 * However some rs4xx report as AGP, so remove that here.
1316
	 */
1317
	if ((rdev->family >= CHIP_RS400) &&
1318
	    (rdev->flags & RADEON_IS_IGP)) {
1319
		rdev->flags &= ~RADEON_IS_AGP;
1320
	}
1321
 
1321 serge 1322
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1221 serge 1323
		radeon_agp_disable(rdev);
6104 serge 1324
	}
1117 serge 1325
 
3764 Serge 1326
	/* Set the internal MC address mask
1327
	 * This is the max address of the GPU's
1328
	 * internal address space.
1329
	 */
1330
	if (rdev->family >= CHIP_CAYMAN)
1331
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1332
	else if (rdev->family >= CHIP_CEDAR)
1333
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1334
	else
1335
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1336
 
1179 serge 1337
	/* set DMA mask + need_dma32 flags.
1338
	 * PCIE - can handle 40-bits.
2997 Serge 1339
	 * IGP - can handle 40-bits
1179 serge 1340
	 * AGP - generally dma32 is safest
2997 Serge 1341
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1179 serge 1342
	 */
1343
	rdev->need_dma32 = false;
1344
	if (rdev->flags & RADEON_IS_AGP)
1345
		rdev->need_dma32 = true;
2997 Serge 1346
	if ((rdev->flags & RADEON_IS_PCI) &&
1347
	    (rdev->family <= CHIP_RS740))
1179 serge 1348
		rdev->need_dma32 = true;
1117 serge 1349
 
1179 serge 1350
	dma_bits = rdev->need_dma32 ? 32 : 40;
1351
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
6104 serge 1352
	if (r) {
1986 serge 1353
		rdev->need_dma32 = true;
2997 Serge 1354
		dma_bits = 32;
6104 serge 1355
		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1356
	}
1117 serge 1357
 
6104 serge 1358
	/* Registers mapping */
1359
	/* TODO: block userspace mapping of io register */
3192 Serge 1360
	spin_lock_init(&rdev->mmio_idx_lock);
5078 serge 1361
	spin_lock_init(&rdev->smc_idx_lock);
1362
	spin_lock_init(&rdev->pll_idx_lock);
1363
	spin_lock_init(&rdev->mc_idx_lock);
1364
	spin_lock_init(&rdev->pcie_idx_lock);
1365
	spin_lock_init(&rdev->pciep_idx_lock);
1366
	spin_lock_init(&rdev->pif_idx_lock);
1367
	spin_lock_init(&rdev->cg_idx_lock);
1368
	spin_lock_init(&rdev->uvd_idx_lock);
1369
	spin_lock_init(&rdev->rcu_idx_lock);
1370
	spin_lock_init(&rdev->didt_idx_lock);
1371
	spin_lock_init(&rdev->end_idx_lock);
1372
	if (rdev->family >= CHIP_BONAIRE) {
1373
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1374
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1375
	} else {
6104 serge 1376
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1377
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
5078 serge 1378
	}
2997 Serge 1379
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
6104 serge 1380
	if (rdev->rmmio == NULL) {
1381
		return -ENOMEM;
1382
	}
1383
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1384
	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1117 serge 1385
 
5078 serge 1386
	/* doorbell bar mapping */
1387
	if (rdev->family >= CHIP_BONAIRE)
1388
		radeon_doorbell_init(rdev);
1389
 
2997 Serge 1390
	/* io port mapping */
1391
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1392
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1393
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1394
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1395
			break;
1396
		}
1397
	}
1398
	if (rdev->rio_mem == NULL)
1399
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1400
 
5078 serge 1401
	if (rdev->flags & RADEON_IS_PX)
1402
		radeon_device_handle_px_quirks(rdev);
1403
	if (rdev->flags & RADEON_IS_PX)
1404
		runtime = true;
2997 Serge 1405
 
1179 serge 1406
	r = radeon_init(rdev);
1221 serge 1407
	if (r)
6104 serge 1408
		goto failed;
1117 serge 1409
 
3192 Serge 1410
 
5078 serge 1411
 
1221 serge 1412
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1413
		/* Acceleration not working on AGP card try again
1414
		 * with fallback to PCI or PCIE GART
1415
		 */
1963 serge 1416
		radeon_asic_reset(rdev);
1221 serge 1417
		radeon_fini(rdev);
1418
		radeon_agp_disable(rdev);
1419
		r = radeon_init(rdev);
1420
		if (r)
6104 serge 1421
			goto failed;
1126 serge 1422
	}
5078 serge 1423
 
5271 serge 1424
//   r = radeon_ib_ring_tests(rdev);
1425
//   if (r)
1426
//       DRM_ERROR("ib ring test failed (%d).\n", r);
1427
 
5078 serge 1428
	if ((radeon_testing & 1)) {
1429
		if (rdev->accel_working)
1430
			radeon_test_moves(rdev);
1431
		else
1432
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1433
	}
1434
	if ((radeon_testing & 2)) {
1435
		if (rdev->accel_working)
1436
			radeon_test_syncing(rdev);
1437
		else
1438
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1439
	}
6104 serge 1440
	if (radeon_benchmarking) {
5078 serge 1441
		if (rdev->accel_working)
6104 serge 1442
			radeon_benchmark(rdev, radeon_benchmarking);
5078 serge 1443
		else
1444
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
6104 serge 1445
	}
1179 serge 1446
	return 0;
6104 serge 1447
 
1448
failed:
1449
	return r;
1117 serge 1450
}
1451
 
2997 Serge 1452
/**
1453
 * radeon_gpu_reset - reset the asic
1454
 *
1455
 * @rdev: radeon device pointer
1456
 *
1457
 * Attempt the reset the GPU if it has hung (all asics).
1458
 * Returns 0 for success or an error on failure.
1459
 */
1460
int radeon_gpu_reset(struct radeon_device *rdev)
1461
{
1462
    unsigned ring_sizes[RADEON_NUM_RINGS];
1463
    uint32_t *ring_data[RADEON_NUM_RINGS];
1179 serge 1464
 
2997 Serge 1465
    bool saved = false;
1466
 
1467
    int i, r;
1468
    int resched;
1469
 
5346 serge 1470
	down_write(&rdev->exclusive_lock);
5078 serge 1471
 
5346 serge 1472
	if (!rdev->needs_reset) {
1473
		up_write(&rdev->exclusive_lock);
1474
		return 0;
1475
	}
1476
 
2997 Serge 1477
    radeon_save_bios_scratch_regs(rdev);
1478
    /* block TTM */
1479
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1480
    radeon_suspend(rdev);
1481
 
1482
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1483
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1484
                           &ring_data[i]);
1485
        if (ring_sizes[i]) {
1486
            saved = true;
1487
            dev_info(rdev->dev, "Saved %d dwords of commands "
1488
                 "on ring %d.\n", ring_sizes[i], i);
1489
        }
1490
    }
1491
 
1492
    r = radeon_asic_reset(rdev);
1493
    if (!r) {
1494
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1495
        radeon_resume(rdev);
1496
    }
1497
 
1498
    radeon_restore_bios_scratch_regs(rdev);
1499
 
1500
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
5271 serge 1501
		if (!r && ring_data[i]) {
2997 Serge 1502
            radeon_ring_restore(rdev, &rdev->ring[i],
1503
                        ring_sizes[i], ring_data[i]);
1504
    } else {
5271 serge 1505
			radeon_fence_driver_force_completion(rdev, i);
2997 Serge 1506
            kfree(ring_data[i]);
1507
        }
1508
    }
1509
 
1510
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1511
    if (r) {
1512
        /* bad news, how to tell it to userspace ? */
1513
        dev_info(rdev->dev, "GPU reset failed\n");
1514
    }
1515
 
5346 serge 1516
	rdev->needs_reset = r == -EAGAIN;
1517
	rdev->in_reset = false;
1518
 
1519
	up_read(&rdev->exclusive_lock);
2997 Serge 1520
    return r;
1521
}
1522
 
1523
 
1117 serge 1524
/*
1525
 * Driver load/unload
1526
 */
1527
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1528
{
1529
    struct radeon_device *rdev;
1530
    int r;
1531
 
1532
 
1120 serge 1533
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1117 serge 1534
    if (rdev == NULL) {
1535
        return -ENOMEM;
1536
    };
1537
 
1538
    dev->dev_private = (void *)rdev;
1539
 
1540
    /* update BUS flag */
5097 serge 1541
    if (drm_pci_device_is_agp(dev)) {
1117 serge 1542
        flags |= RADEON_IS_AGP;
1239 serge 1543
    } else if (drm_device_is_pcie(dev)) {
1544
        flags |= RADEON_IS_PCIE;
1545
    } else {
1546
        flags |= RADEON_IS_PCI;
1547
    }
1117 serge 1548
 
1182 serge 1549
    /* radeon_device_init should report only fatal error
1550
     * like memory allocation failure or iomapping failure,
1551
     * or memory manager initialization failure, it must
1552
     * properly initialize the GPU MC controller and permit
1553
     * VRAM allocation
1554
     */
1117 serge 1555
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1556
    if (r) {
1182 serge 1557
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1117 serge 1558
        return r;
1559
    }
1182 serge 1560
    /* Again modeset_init should fail only on fatal error
1561
     * otherwise it should provide enough functionalities
1562
     * for shadowfb to run
1563
     */
5078 serge 1564
    main_device = dev;
1565
 
1246 serge 1566
    if( radeon_modeset )
1567
    {
1268 serge 1568
        r = radeon_modeset_init(rdev);
1569
        if (r) {
1570
            return r;
1571
        }
5078 serge 1572
        init_display_kms(dev, &usermode);
1573
    }
1986 serge 1574
    else
5078 serge 1575
        init_display(rdev, &usermode);
1126 serge 1576
 
1117 serge 1577
    return 0;
5078 serge 1578
}
1117 serge 1579
 
1580
 
1221 serge 1581
 
1117 serge 1582
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1583
{
1584
    return pci_resource_start(dev->pdev, resource);
1585
}
1586
 
1587
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1588
{
1589
    return pci_resource_len(dev->pdev, resource);
1590
}
1591
 
1123 serge 1592
 
1593
uint32_t __div64_32(uint64_t *n, uint32_t base)
1594
{
1595
        uint64_t rem = *n;
1596
        uint64_t b = base;
1597
        uint64_t res, d = 1;
1598
        uint32_t high = rem >> 32;
1599
 
1600
        /* Reduce the thing a bit first */
1601
        res = 0;
1602
        if (high >= base) {
1603
                high /= base;
1604
                res = (uint64_t) high << 32;
1605
                rem -= (uint64_t) (high*base) << 32;
1606
        }
1607
 
1608
        while ((int64_t)b > 0 && b < rem) {
1609
                b = b+b;
1610
                d = d+d;
1611
        }
1612
 
1613
        do {
1614
                if (rem >= b) {
1615
                        rem -= b;
1616
                        res += d;
1617
                }
1618
                b >>= 1;
1619
                d >>= 1;
1620
        } while (d);
1621
 
1622
        *n = res;
1623
        return rem;
1624
}
1625
 
1239 serge 1626
static struct pci_device_id pciidlist[] = {
1627
    radeon_PCI_IDS
1628
};
1629
 
6104 serge 1630
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1631
int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1632
void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1633
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
1634
				    int *max_error,
1635
				    struct timeval *vblank_time,
1636
				    unsigned flags);
1637
void radeon_gem_object_free(struct drm_gem_object *obj);
5078 serge 1638
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1639
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1640
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1641
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1239 serge 1642
 
1643
 
5078 serge 1644
static struct drm_driver kms_driver = {
1645
    .driver_features =
1646
        DRIVER_USE_AGP |
1647
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1648
        DRIVER_PRIME | DRIVER_RENDER,
1649
    .load = radeon_driver_load_kms,
1650
//    .open = radeon_driver_open_kms,
1651
//    .preclose = radeon_driver_preclose_kms,
1652
//    .postclose = radeon_driver_postclose_kms,
1653
//    .lastclose = radeon_driver_lastclose_kms,
1654
//    .unload = radeon_driver_unload_kms,
6104 serge 1655
    .get_vblank_counter = radeon_get_vblank_counter_kms,
1656
    .enable_vblank = radeon_enable_vblank_kms,
1657
    .disable_vblank = radeon_disable_vblank_kms,
1658
    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1659
    .get_scanout_position = radeon_get_crtc_scanoutpos,
5078 serge 1660
#if defined(CONFIG_DEBUG_FS)
1661
    .debugfs_init = radeon_debugfs_init,
1662
    .debugfs_cleanup = radeon_debugfs_cleanup,
1663
#endif
1664
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1665
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1666
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1667
    .irq_handler = radeon_driver_irq_handler_kms,
1668
//    .ioctls = radeon_ioctls_kms,
6104 serge 1669
    .gem_free_object = radeon_gem_object_free,
5078 serge 1670
//    .gem_open_object = radeon_gem_object_open,
1671
//    .gem_close_object = radeon_gem_object_close,
1672
//    .dumb_create = radeon_mode_dumb_create,
1673
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1674
//    .dumb_destroy = drm_gem_dumb_destroy,
1675
//    .fops = &radeon_driver_kms_fops,
3120 serge 1676
 
5078 serge 1677
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1678
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1679
//    .gem_prime_export = drm_gem_prime_export,
1680
//    .gem_prime_import = drm_gem_prime_import,
1681
//    .gem_prime_pin = radeon_gem_prime_pin,
1682
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1683
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1684
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1685
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1686
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1239 serge 1687
 
5078 serge 1688
};
2007 serge 1689
 
5078 serge 1690
int ati_init(void)
1239 serge 1691
{
5078 serge 1692
    static pci_dev_t device;
2997 Serge 1693
    const struct pci_device_id  *ent;
5078 serge 1694
    int  err;
1239 serge 1695
 
1696
    ent = find_pci_device(&device, pciidlist);
1697
    if( unlikely(ent == NULL) )
1698
    {
1699
        dbgprintf("device not found\n");
5078 serge 1700
        return -ENODEV;
1239 serge 1701
    };
1702
 
5078 serge 1703
    drm_core_init();
1704
 
1705
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1239 serge 1706
                                device.pci_dev.device);
1707
 
5078 serge 1708
    kms_driver.driver_features |= DRIVER_MODESET;
3764 Serge 1709
 
5078 serge 1710
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1239 serge 1711
 
1246 serge 1712
    return err;
5078 serge 1713
}
1430 serge 1714