Subversion Repositories Kolibri OS

Rev

Rev 6104 | Rev 6938 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
2997 Serge 29
#include 
1179 serge 30
#include 
31
#include 
1221 serge 32
#include 
6104 serge 33
#include 
1117 serge 34
#include "radeon_reg.h"
35
#include "radeon.h"
36
#include "atom.h"
3120 serge 37
 
1428 serge 38
#include "display.h"
1117 serge 39
 
3120 serge 40
 
1221 serge 41
#include 
42
 
5078 serge 43
#define PCI_VENDOR_ID_ATI               0x1002
44
#define PCI_VENDOR_ID_APPLE             0x106b
1117 serge 45
 
5078 serge 46
int radeon_no_wb;
1430 serge 47
int radeon_modeset = -1;
48
int radeon_dynclks = -1;
49
int radeon_r4xx_atom = 0;
50
int radeon_agpmode = 0;
51
int radeon_vram_limit = 0;
5078 serge 52
int radeon_gart_size = -1; /* auto */
1430 serge 53
int radeon_benchmarking = 0;
54
int radeon_testing = 0;
55
int radeon_connector_table = 0;
56
int radeon_tv = 1;
5078 serge 57
int radeon_audio = -1;
58
int radeon_disp_priority = 0;
1963 serge 59
int radeon_hw_i2c = 0;
5078 serge 60
int radeon_pcie_gen2 = -1;
61
int radeon_msi = -1;
2997 Serge 62
int radeon_lockup_timeout = 10000;
3764 Serge 63
int radeon_fastfb = 0;
5078 serge 64
int radeon_dpm = -1;
65
int radeon_aspm = -1;
66
int radeon_runtime_pm = -1;
67
int radeon_hard_reset = 0;
68
int radeon_vm_size = 8;
69
int radeon_vm_block_size = -1;
70
int radeon_deep_color = 0;
71
int radeon_use_pflipirq = 2;
2160 serge 72
int irq_override = 0;
5078 serge 73
int radeon_bapm = -1;
5271 serge 74
int radeon_backlight = 0;
6104 serge 75
int radeon_auxch = -1;
76
int radeon_mst = 0;
77
 
5078 serge 78
extern display_t *os_display;
79
extern struct drm_device *main_device;
80
extern videomode_t usermode;
1246 serge 81
 
3120 serge 82
 
1404 serge 83
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
84
int init_display(struct radeon_device *rdev, videomode_t *mode);
5078 serge 85
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
1117 serge 86
 
5271 serge 87
int get_modes(videomode_t *mode, u32 *count);
1404 serge 88
int set_user_mode(videomode_t *mode);
1428 serge 89
int r100_2D_test(struct radeon_device *rdev);
1239 serge 90
 
1404 serge 91
 
1233 serge 92
 /* Legacy VGA regions */
93
#define VGA_RSRC_NONE          0x00
94
#define VGA_RSRC_LEGACY_IO     0x01
95
#define VGA_RSRC_LEGACY_MEM    0x02
96
#define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
97
/* Non-legacy access */
98
#define VGA_RSRC_NORMAL_IO     0x04
99
#define VGA_RSRC_NORMAL_MEM    0x08
100
 
101
 
1963 serge 102
static const char radeon_family_name[][16] = {
103
	"R100",
104
	"RV100",
105
	"RS100",
106
	"RV200",
107
	"RS200",
108
	"R200",
109
	"RV250",
110
	"RS300",
111
	"RV280",
112
	"R300",
113
	"R350",
114
	"RV350",
115
	"RV380",
116
	"R420",
117
	"R423",
118
	"RV410",
119
	"RS400",
120
	"RS480",
121
	"RS600",
122
	"RS690",
123
	"RS740",
124
	"RV515",
125
	"R520",
126
	"RV530",
127
	"RV560",
128
	"RV570",
129
	"R580",
130
	"R600",
131
	"RV610",
132
	"RV630",
133
	"RV670",
134
	"RV620",
135
	"RV635",
136
	"RS780",
137
	"RS880",
138
	"RV770",
139
	"RV730",
140
	"RV710",
141
	"RV740",
142
	"CEDAR",
143
	"REDWOOD",
144
	"JUNIPER",
145
	"CYPRESS",
146
	"HEMLOCK",
147
	"PALM",
1986 serge 148
	"SUMO",
149
	"SUMO2",
1963 serge 150
	"BARTS",
151
	"TURKS",
152
	"CAICOS",
153
	"CAYMAN",
2997 Serge 154
	"ARUBA",
155
	"TAHITI",
156
	"PITCAIRN",
157
	"VERDE",
3764 Serge 158
	"OLAND",
159
	"HAINAN",
5078 serge 160
	"BONAIRE",
161
	"KAVERI",
162
	"KABINI",
163
	"HAWAII",
164
	"MULLINS",
1963 serge 165
	"LAST",
166
};
1233 serge 167
 
5078 serge 168
#define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
169
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
170
 
171
struct radeon_px_quirk {
172
	u32 chip_vendor;
173
	u32 chip_device;
174
	u32 subsys_vendor;
175
	u32 subsys_device;
176
	u32 px_quirk_flags;
177
};
178
 
179
static struct radeon_px_quirk radeon_px_quirk_list[] = {
180
	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
181
	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
182
	 */
183
	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
184
	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
185
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
186
	 */
187
	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
5179 serge 188
	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
189
	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
190
	 */
191
	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
5078 serge 192
	/* macbook pro 8.2 */
193
	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
194
	{ 0, 0, 0, 0, 0 },
195
};
196
 
197
bool radeon_is_px(struct drm_device *dev)
198
{
199
	struct radeon_device *rdev = dev->dev_private;
200
 
201
	if (rdev->flags & RADEON_IS_PX)
202
		return true;
203
	return false;
204
}
205
 
206
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
207
{
208
	struct radeon_px_quirk *p = radeon_px_quirk_list;
209
 
210
	/* Apply PX quirks */
211
	while (p && p->chip_device != 0) {
212
		if (rdev->pdev->vendor == p->chip_vendor &&
213
		    rdev->pdev->device == p->chip_device &&
214
		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
215
		    rdev->pdev->subsystem_device == p->subsys_device) {
216
			rdev->px_quirk_flags = p->px_quirk_flags;
217
			break;
218
		}
219
		++p;
220
	}
221
 
222
	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
223
		rdev->flags &= ~RADEON_IS_PX;
224
}
225
 
2997 Serge 226
/**
3764 Serge 227
 * radeon_program_register_sequence - program an array of registers.
228
 *
229
 * @rdev: radeon_device pointer
230
 * @registers: pointer to the register array
231
 * @array_size: size of the register array
232
 *
233
 * Programs an array or registers with and and or masks.
234
 * This is a helper for setting golden registers.
235
 */
236
void radeon_program_register_sequence(struct radeon_device *rdev,
237
				      const u32 *registers,
238
				      const u32 array_size)
239
{
240
	u32 tmp, reg, and_mask, or_mask;
241
	int i;
242
 
243
	if (array_size % 3)
244
		return;
245
 
246
	for (i = 0; i < array_size; i +=3) {
247
		reg = registers[i + 0];
248
		and_mask = registers[i + 1];
249
		or_mask = registers[i + 2];
250
 
251
		if (and_mask == 0xffffffff) {
252
			tmp = or_mask;
253
		} else {
254
			tmp = RREG32(reg);
255
			tmp &= ~and_mask;
256
			tmp |= or_mask;
257
		}
258
		WREG32(reg, tmp);
259
	}
260
}
261
 
5078 serge 262
void radeon_pci_config_reset(struct radeon_device *rdev)
263
{
264
	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
265
}
266
 
3764 Serge 267
/**
2997 Serge 268
 * radeon_surface_init - Clear GPU surface registers.
269
 *
270
 * @rdev: radeon_device pointer
271
 *
272
 * Clear GPU surface registers (r1xx-r5xx).
1117 serge 273
 */
1179 serge 274
void radeon_surface_init(struct radeon_device *rdev)
1117 serge 275
{
6104 serge 276
	/* FIXME: check this out */
277
	if (rdev->family < CHIP_R600) {
278
		int i;
1117 serge 279
 
1321 serge 280
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
5078 serge 281
			if (rdev->surface_regs[i].bo)
282
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
283
			else
6104 serge 284
				radeon_clear_surface_reg(rdev, i);
285
		}
1179 serge 286
		/* enable surfaces */
287
		WREG32(RADEON_SURFACE_CNTL, 0);
6104 serge 288
	}
1117 serge 289
}
290
 
291
/*
292
 * GPU scratch registers helpers function.
293
 */
2997 Serge 294
/**
295
 * radeon_scratch_init - Init scratch register driver information.
296
 *
297
 * @rdev: radeon_device pointer
298
 *
299
 * Init CP scratch register driver information (r1xx-r5xx)
300
 */
1179 serge 301
void radeon_scratch_init(struct radeon_device *rdev)
1117 serge 302
{
6104 serge 303
	int i;
1117 serge 304
 
6104 serge 305
	/* FIXME: check this out */
306
	if (rdev->family < CHIP_R300) {
307
		rdev->scratch.num_reg = 5;
308
	} else {
309
		rdev->scratch.num_reg = 7;
310
	}
1963 serge 311
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
6104 serge 312
	for (i = 0; i < rdev->scratch.num_reg; i++) {
313
		rdev->scratch.free[i] = true;
1963 serge 314
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
6104 serge 315
	}
1117 serge 316
}
317
 
2997 Serge 318
/**
319
 * radeon_scratch_get - Allocate a scratch register
320
 *
321
 * @rdev: radeon_device pointer
322
 * @reg: scratch register mmio offset
323
 *
324
 * Allocate a CP scratch register for use by the driver (all asics).
325
 * Returns 0 on success or -EINVAL on failure.
326
 */
1117 serge 327
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
328
{
329
	int i;
330
 
331
	for (i = 0; i < rdev->scratch.num_reg; i++) {
332
		if (rdev->scratch.free[i]) {
333
			rdev->scratch.free[i] = false;
334
			*reg = rdev->scratch.reg[i];
335
			return 0;
336
		}
337
	}
338
	return -EINVAL;
339
}
340
 
2997 Serge 341
/**
342
 * radeon_scratch_free - Free a scratch register
343
 *
344
 * @rdev: radeon_device pointer
345
 * @reg: scratch register mmio offset
346
 *
347
 * Free a CP scratch register allocated for use by the driver (all asics)
348
 */
1117 serge 349
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
350
{
351
	int i;
352
 
353
	for (i = 0; i < rdev->scratch.num_reg; i++) {
354
		if (rdev->scratch.reg[i] == reg) {
355
			rdev->scratch.free[i] = true;
356
			return;
357
		}
358
	}
359
}
360
 
2997 Serge 361
/*
5078 serge 362
 * GPU doorbell aperture helpers function.
363
 */
364
/**
365
 * radeon_doorbell_init - Init doorbell driver information.
366
 *
367
 * @rdev: radeon_device pointer
368
 *
369
 * Init doorbell driver information (CIK)
370
 * Returns 0 on success, error on failure.
371
 */
372
static int radeon_doorbell_init(struct radeon_device *rdev)
373
{
374
	/* doorbell bar mapping */
375
	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
376
	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
377
 
378
	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
379
	if (rdev->doorbell.num_doorbells == 0)
380
		return -EINVAL;
381
 
382
	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
383
	if (rdev->doorbell.ptr == NULL) {
384
		return -ENOMEM;
385
	}
386
	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
387
	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
388
 
389
	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
390
 
391
	return 0;
392
}
393
 
394
/**
395
 * radeon_doorbell_fini - Tear down doorbell driver information.
396
 *
397
 * @rdev: radeon_device pointer
398
 *
399
 * Tear down doorbell driver information (CIK)
400
 */
401
static void radeon_doorbell_fini(struct radeon_device *rdev)
402
{
403
	iounmap(rdev->doorbell.ptr);
404
	rdev->doorbell.ptr = NULL;
405
}
406
 
407
/**
408
 * radeon_doorbell_get - Allocate a doorbell entry
409
 *
410
 * @rdev: radeon_device pointer
411
 * @doorbell: doorbell index
412
 *
413
 * Allocate a doorbell for use by the driver (all asics).
414
 * Returns 0 on success or -EINVAL on failure.
415
 */
416
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
417
{
418
	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
419
	if (offset < rdev->doorbell.num_doorbells) {
420
		__set_bit(offset, rdev->doorbell.used);
421
		*doorbell = offset;
422
		return 0;
423
	} else {
424
		return -EINVAL;
425
	}
426
}
427
 
428
/**
429
 * radeon_doorbell_free - Free a doorbell entry
430
 *
431
 * @rdev: radeon_device pointer
432
 * @doorbell: doorbell index
433
 *
434
 * Free a doorbell allocated for use by the driver (all asics)
435
 */
436
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
437
{
438
	if (doorbell < rdev->doorbell.num_doorbells)
439
		__clear_bit(doorbell, rdev->doorbell.used);
440
}
441
 
5271 serge 442
/**
443
 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
444
 *                                setup KFD
445
 *
446
 * @rdev: radeon_device pointer
447
 * @aperture_base: output returning doorbell aperture base physical address
448
 * @aperture_size: output returning doorbell aperture size in bytes
449
 * @start_offset: output returning # of doorbell bytes reserved for radeon.
450
 *
451
 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
452
 * takes doorbells required for its own rings and reports the setup to KFD.
453
 * Radeon reserved doorbells are at the start of the doorbell aperture.
454
 */
455
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
456
				  phys_addr_t *aperture_base,
457
				  size_t *aperture_size,
458
				  size_t *start_offset)
459
{
460
	/* The first num_doorbells are used by radeon.
461
	 * KFD takes whatever's left in the aperture. */
462
	if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
463
		*aperture_base = rdev->doorbell.base;
464
		*aperture_size = rdev->doorbell.size;
465
		*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
466
	} else {
467
		*aperture_base = 0;
468
		*aperture_size = 0;
469
		*start_offset = 0;
470
	}
471
}
472
 
5078 serge 473
/*
2997 Serge 474
 * radeon_wb_*()
475
 * Writeback is the the method by which the the GPU updates special pages
476
 * in memory with the status of certain GPU events (fences, ring pointers,
477
 * etc.).
478
 */
479
 
480
/**
481
 * radeon_wb_disable - Disable Writeback
482
 *
483
 * @rdev: radeon_device pointer
484
 *
485
 * Disables Writeback (all asics).  Used for suspend.
486
 */
2004 serge 487
void radeon_wb_disable(struct radeon_device *rdev)
488
{
489
	rdev->wb.enabled = false;
490
}
491
 
2997 Serge 492
/**
493
 * radeon_wb_fini - Disable Writeback and free memory
494
 *
495
 * @rdev: radeon_device pointer
496
 *
497
 * Disables Writeback and frees the Writeback memory (all asics).
498
 * Used at driver shutdown.
499
 */
2004 serge 500
void radeon_wb_fini(struct radeon_device *rdev)
501
{
502
	radeon_wb_disable(rdev);
503
	if (rdev->wb.wb_obj) {
5078 serge 504
		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
505
			radeon_bo_kunmap(rdev->wb.wb_obj);
506
			radeon_bo_unpin(rdev->wb.wb_obj);
507
			radeon_bo_unreserve(rdev->wb.wb_obj);
508
		}
2004 serge 509
		radeon_bo_unref(&rdev->wb.wb_obj);
510
		rdev->wb.wb = NULL;
511
		rdev->wb.wb_obj = NULL;
512
	}
513
}
514
 
2997 Serge 515
/**
516
 * radeon_wb_init- Init Writeback driver info and allocate memory
517
 *
518
 * @rdev: radeon_device pointer
519
 *
520
 * Disables Writeback and frees the Writeback memory (all asics).
521
 * Used at driver startup.
522
 * Returns 0 on success or an -error on failure.
523
 */
2004 serge 524
int radeon_wb_init(struct radeon_device *rdev)
525
{
526
	int r;
527
 
528
	if (rdev->wb.wb_obj == NULL) {
529
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
5271 serge 530
				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
5078 serge 531
				     &rdev->wb.wb_obj);
2004 serge 532
		if (r) {
533
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
534
			return r;
535
		}
6104 serge 536
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
537
		if (unlikely(r != 0)) {
538
			radeon_wb_fini(rdev);
539
			return r;
540
		}
541
		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
542
				&rdev->wb.gpu_addr);
543
		if (r) {
544
			radeon_bo_unreserve(rdev->wb.wb_obj);
545
			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
546
			radeon_wb_fini(rdev);
547
			return r;
548
		}
549
		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2004 serge 550
		radeon_bo_unreserve(rdev->wb.wb_obj);
6104 serge 551
		if (r) {
552
			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
553
			radeon_wb_fini(rdev);
554
			return r;
555
		}
2004 serge 556
	}
557
 
558
	/* clear wb memory */
559
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
560
	/* disable event_write fences */
561
	rdev->wb.use_event = false;
562
	/* disabled via module param */
2997 Serge 563
	if (radeon_no_wb == 1) {
2004 serge 564
		rdev->wb.enabled = false;
2997 Serge 565
	} else {
566
		if (rdev->flags & RADEON_IS_AGP) {
6104 serge 567
			/* often unreliable on AGP */
2997 Serge 568
			rdev->wb.enabled = false;
569
		} else if (rdev->family < CHIP_R300) {
570
			/* often unreliable on pre-r300 */
571
			rdev->wb.enabled = false;
572
		} else {
2004 serge 573
			rdev->wb.enabled = true;
574
			/* event_write fences are only available on r600+ */
2997 Serge 575
			if (rdev->family >= CHIP_R600) {
2004 serge 576
				rdev->wb.use_event = true;
6104 serge 577
			}
2997 Serge 578
		}
579
	}
580
	/* always use writeback/events on NI, APUs */
581
	if (rdev->family >= CHIP_PALM) {
2004 serge 582
		rdev->wb.enabled = true;
583
		rdev->wb.use_event = true;
584
	}
585
 
586
	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
587
 
588
	return 0;
589
}
590
 
1430 serge 591
/**
592
 * radeon_vram_location - try to find VRAM location
593
 * @rdev: radeon device structure holding all necessary informations
594
 * @mc: memory controller structure holding memory informations
595
 * @base: base address at which to put VRAM
596
 *
597
 * Function will place try to place VRAM at base address provided
598
 * as parameter (which is so far either PCI aperture address or
599
 * for IGP TOM base address).
600
 *
601
 * If there is not enough space to fit the unvisible VRAM in the 32bits
602
 * address space then we limit the VRAM size to the aperture.
603
 *
604
 * If we are using AGP and if the AGP aperture doesn't allow us to have
605
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
606
 * size and print a warning.
607
 *
608
 * This function will never fails, worst case are limiting VRAM.
609
 *
610
 * Note: GTT start, end, size should be initialized before calling this
611
 * function on AGP platform.
612
 *
1963 serge 613
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
1430 serge 614
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
615
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
616
 * not IGP.
617
 *
618
 * Note: we use mc_vram_size as on some board we need to program the mc to
619
 * cover the whole aperture even if VRAM size is inferior to aperture size
620
 * Novell bug 204882 + along with lots of ubuntu ones
621
 *
622
 * Note: when limiting vram it's safe to overwritte real_vram_size because
623
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
624
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
625
 * ones)
626
 *
627
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
628
 * explicitly check for that thought.
629
 *
630
 * FIXME: when reducing VRAM size align new size on power of 2.
1117 serge 631
 */
1430 serge 632
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
1117 serge 633
{
2997 Serge 634
	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
635
 
1430 serge 636
	mc->vram_start = base;
3764 Serge 637
	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
1430 serge 638
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
639
		mc->real_vram_size = mc->aper_size;
640
		mc->mc_vram_size = mc->aper_size;
641
	}
642
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1963 serge 643
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
1430 serge 644
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
645
		mc->real_vram_size = mc->aper_size;
646
		mc->mc_vram_size = mc->aper_size;
6104 serge 647
	}
1430 serge 648
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2997 Serge 649
	if (limit && limit < mc->real_vram_size)
650
		mc->real_vram_size = limit;
1963 serge 651
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
1430 serge 652
			mc->mc_vram_size >> 20, mc->vram_start,
653
			mc->vram_end, mc->real_vram_size >> 20);
654
}
1117 serge 655
 
1430 serge 656
/**
657
 * radeon_gtt_location - try to find GTT location
658
 * @rdev: radeon device structure holding all necessary informations
659
 * @mc: memory controller structure holding memory informations
660
 *
661
 * Function will place try to place GTT before or after VRAM.
662
 *
663
 * If GTT size is bigger than space left then we ajust GTT size.
664
 * Thus function will never fails.
665
 *
666
 * FIXME: when reducing GTT size align new size on power of 2.
667
 */
668
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
669
{
670
	u64 size_af, size_bf;
671
 
3764 Serge 672
	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
1963 serge 673
	size_bf = mc->vram_start & ~mc->gtt_base_align;
1430 serge 674
	if (size_bf > size_af) {
675
		if (mc->gtt_size > size_bf) {
676
			dev_warn(rdev->dev, "limiting GTT\n");
677
			mc->gtt_size = size_bf;
1117 serge 678
		}
1963 serge 679
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
1430 serge 680
	} else {
681
		if (mc->gtt_size > size_af) {
682
			dev_warn(rdev->dev, "limiting GTT\n");
683
			mc->gtt_size = size_af;
1117 serge 684
		}
1963 serge 685
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
1117 serge 686
	}
1430 serge 687
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
1963 serge 688
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
1430 serge 689
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
1117 serge 690
}
691
 
692
/*
693
 * GPU helpers function.
694
 */
2997 Serge 695
/**
696
 * radeon_card_posted - check if the hw has already been initialized
697
 *
698
 * @rdev: radeon_device pointer
699
 *
700
 * Check if the asic has been initialized (all asics).
701
 * Used at driver startup.
702
 * Returns true if initialized or false if not.
703
 */
1179 serge 704
bool radeon_card_posted(struct radeon_device *rdev)
1117 serge 705
{
706
	uint32_t reg;
707
 
3764 Serge 708
	if (ASIC_IS_NODCE(rdev))
709
		goto check_memsize;
710
 
1117 serge 711
	/* first check CRTCs */
3764 Serge 712
	if (ASIC_IS_DCE4(rdev)) {
1430 serge 713
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
1963 serge 714
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3764 Serge 715
			if (rdev->num_crtc >= 4) {
716
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
717
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
718
			}
719
			if (rdev->num_crtc >= 6) {
720
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
6104 serge 721
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3764 Serge 722
			}
1430 serge 723
		if (reg & EVERGREEN_CRTC_MASTER_EN)
724
			return true;
725
	} else if (ASIC_IS_AVIVO(rdev)) {
1117 serge 726
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
727
		      RREG32(AVIVO_D2CRTC_CONTROL);
728
		if (reg & AVIVO_CRTC_EN) {
729
			return true;
730
		}
731
	} else {
732
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
733
		      RREG32(RADEON_CRTC2_GEN_CNTL);
734
		if (reg & RADEON_CRTC_EN) {
735
			return true;
736
		}
737
	}
738
 
3764 Serge 739
check_memsize:
1117 serge 740
	/* then check MEM_SIZE, in case the crtcs are off */
741
	if (rdev->family >= CHIP_R600)
742
		reg = RREG32(R600_CONFIG_MEMSIZE);
743
	else
744
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
745
 
746
	if (reg)
747
		return true;
748
 
749
	return false;
750
 
751
}
752
 
2997 Serge 753
/**
754
 * radeon_update_bandwidth_info - update display bandwidth params
755
 *
756
 * @rdev: radeon_device pointer
757
 *
758
 * Used when sclk/mclk are switched or display modes are set.
759
 * params are used to calculate display watermarks (all asics)
760
 */
1963 serge 761
void radeon_update_bandwidth_info(struct radeon_device *rdev)
762
{
763
	fixed20_12 a;
764
	u32 sclk = rdev->pm.current_sclk;
765
	u32 mclk = rdev->pm.current_mclk;
766
 
767
	/* sclk/mclk in Mhz */
6104 serge 768
	a.full = dfixed_const(100);
769
	rdev->pm.sclk.full = dfixed_const(sclk);
770
	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
771
	rdev->pm.mclk.full = dfixed_const(mclk);
772
	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
1963 serge 773
 
774
	if (rdev->flags & RADEON_IS_IGP) {
775
		a.full = dfixed_const(16);
776
		/* core_bandwidth = sclk(Mhz) * 16 */
777
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
778
	}
779
}
780
 
2997 Serge 781
/**
782
 * radeon_boot_test_post_card - check and possibly initialize the hw
783
 *
784
 * @rdev: radeon_device pointer
785
 *
786
 * Check if the asic is initialized and if not, attempt to initialize
787
 * it (all asics).
788
 * Returns true if initialized or false if not.
789
 */
1321 serge 790
bool radeon_boot_test_post_card(struct radeon_device *rdev)
791
{
792
	if (radeon_card_posted(rdev))
793
		return true;
794
 
795
	if (rdev->bios) {
796
		DRM_INFO("GPU not posted. posting now...\n");
797
		if (rdev->is_atom_bios)
798
			atom_asic_init(rdev->mode_info.atom_context);
799
		else
800
			radeon_combios_asic_init(rdev->ddev);
801
		return true;
802
	} else {
803
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
804
		return false;
805
	}
806
}
807
 
2997 Serge 808
/**
809
 * radeon_dummy_page_init - init dummy page used by the driver
810
 *
811
 * @rdev: radeon_device pointer
812
 *
813
 * Allocate the dummy page used by the driver (all asics).
814
 * This dummy page is used by the driver as a filler for gart entries
815
 * when pages are taken out of the GART
816
 * Returns 0 on sucess, -ENOMEM on failure.
817
 */
1233 serge 818
int radeon_dummy_page_init(struct radeon_device *rdev)
819
{
1430 serge 820
	if (rdev->dummy_page.page)
821
		return 0;
5078 serge 822
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
1233 serge 823
	if (rdev->dummy_page.page == NULL)
824
		return -ENOMEM;
5078 serge 825
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
826
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
6661 serge 827
	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
828
							    RADEON_GART_PAGE_DUMMY);
1233 serge 829
	return 0;
830
}
1117 serge 831
 
2997 Serge 832
/**
833
 * radeon_dummy_page_fini - free dummy page used by the driver
834
 *
835
 * @rdev: radeon_device pointer
836
 *
837
 * Frees the dummy page used by the driver (all asics).
838
 */
1233 serge 839
void radeon_dummy_page_fini(struct radeon_device *rdev)
840
{
841
	if (rdev->dummy_page.page == NULL)
842
		return;
5078 serge 843
 
1233 serge 844
	rdev->dummy_page.page = NULL;
845
}
846
 
847
 
1117 serge 848
/* ATOM accessor methods */
2997 Serge 849
/*
850
 * ATOM is an interpreted byte code stored in tables in the vbios.  The
851
 * driver registers callbacks to access registers and the interpreter
852
 * in the driver parses the tables and executes then to program specific
853
 * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
854
 * atombios.h, and atom.c
855
 */
856
 
857
/**
858
 * cail_pll_read - read PLL register
859
 *
860
 * @info: atom card_info pointer
861
 * @reg: PLL register offset
862
 *
863
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
864
 * Returns the value of the PLL register.
865
 */
1117 serge 866
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
867
{
6104 serge 868
	struct radeon_device *rdev = info->dev->dev_private;
869
	uint32_t r;
1117 serge 870
 
6104 serge 871
	r = rdev->pll_rreg(rdev, reg);
872
	return r;
1117 serge 873
}
874
 
2997 Serge 875
/**
876
 * cail_pll_write - write PLL register
877
 *
878
 * @info: atom card_info pointer
879
 * @reg: PLL register offset
880
 * @val: value to write to the pll register
881
 *
882
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
883
 */
1117 serge 884
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
885
{
6104 serge 886
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 887
 
6104 serge 888
	rdev->pll_wreg(rdev, reg, val);
1117 serge 889
}
890
 
2997 Serge 891
/**
892
 * cail_mc_read - read MC (Memory Controller) register
893
 *
894
 * @info: atom card_info pointer
895
 * @reg: MC register offset
896
 *
897
 * Provides an MC register accessor for the atom interpreter (r4xx+).
898
 * Returns the value of the MC register.
899
 */
1117 serge 900
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
901
{
6104 serge 902
	struct radeon_device *rdev = info->dev->dev_private;
903
	uint32_t r;
1117 serge 904
 
6104 serge 905
	r = rdev->mc_rreg(rdev, reg);
906
	return r;
1117 serge 907
}
908
 
2997 Serge 909
/**
910
 * cail_mc_write - write MC (Memory Controller) register
911
 *
912
 * @info: atom card_info pointer
913
 * @reg: MC register offset
914
 * @val: value to write to the pll register
915
 *
916
 * Provides a MC register accessor for the atom interpreter (r4xx+).
917
 */
1117 serge 918
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
919
{
6104 serge 920
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 921
 
6104 serge 922
	rdev->mc_wreg(rdev, reg, val);
1117 serge 923
}
924
 
2997 Serge 925
/**
926
 * cail_reg_write - write MMIO register
927
 *
928
 * @info: atom card_info pointer
929
 * @reg: MMIO register offset
930
 * @val: value to write to the pll register
931
 *
932
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
933
 */
1117 serge 934
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
935
{
6104 serge 936
	struct radeon_device *rdev = info->dev->dev_private;
1117 serge 937
 
6104 serge 938
	WREG32(reg*4, val);
1117 serge 939
}
940
 
2997 Serge 941
/**
942
 * cail_reg_read - read MMIO register
943
 *
944
 * @info: atom card_info pointer
945
 * @reg: MMIO register offset
946
 *
947
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
948
 * Returns the value of the MMIO register.
949
 */
1117 serge 950
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
951
{
6104 serge 952
	struct radeon_device *rdev = info->dev->dev_private;
953
	uint32_t r;
1117 serge 954
 
6104 serge 955
	r = RREG32(reg*4);
956
	return r;
1117 serge 957
}
958
 
2997 Serge 959
/**
960
 * cail_ioreg_write - write IO register
961
 *
962
 * @info: atom card_info pointer
963
 * @reg: IO register offset
964
 * @val: value to write to the pll register
965
 *
966
 * Provides a IO register accessor for the atom interpreter (r4xx+).
967
 */
1963 serge 968
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
969
{
970
	struct radeon_device *rdev = info->dev->dev_private;
971
 
972
	WREG32_IO(reg*4, val);
973
}
974
 
2997 Serge 975
/**
976
 * cail_ioreg_read - read IO register
977
 *
978
 * @info: atom card_info pointer
979
 * @reg: IO register offset
980
 *
981
 * Provides an IO register accessor for the atom interpreter (r4xx+).
982
 * Returns the value of the IO register.
983
 */
1963 serge 984
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
985
{
986
	struct radeon_device *rdev = info->dev->dev_private;
987
	uint32_t r;
988
 
989
	r = RREG32_IO(reg*4);
990
	return r;
991
}
992
 
2997 Serge 993
/**
994
 * radeon_atombios_init - init the driver info and callbacks for atombios
995
 *
996
 * @rdev: radeon_device pointer
997
 *
998
 * Initializes the driver info and register access callbacks for the
999
 * ATOM interpreter (r4xx+).
1000
 * Returns 0 on sucess, -ENOMEM on failure.
1001
 * Called at driver startup.
1002
 */
1117 serge 1003
int radeon_atombios_init(struct radeon_device *rdev)
1004
{
1268 serge 1005
	struct card_info *atom_card_info =
1006
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1117 serge 1007
 
1268 serge 1008
	if (!atom_card_info)
1009
		return -ENOMEM;
1010
 
1011
	rdev->mode_info.atom_card_info = atom_card_info;
1012
	atom_card_info->dev = rdev->ddev;
1013
	atom_card_info->reg_read = cail_reg_read;
1014
	atom_card_info->reg_write = cail_reg_write;
1963 serge 1015
	/* needed for iio ops */
1016
	if (rdev->rio_mem) {
1017
		atom_card_info->ioreg_read = cail_ioreg_read;
1018
		atom_card_info->ioreg_write = cail_ioreg_write;
1019
	} else {
1020
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1021
		atom_card_info->ioreg_read = cail_reg_read;
1022
		atom_card_info->ioreg_write = cail_reg_write;
1023
	}
1268 serge 1024
	atom_card_info->mc_read = cail_mc_read;
1025
	atom_card_info->mc_write = cail_mc_write;
1026
	atom_card_info->pll_read = cail_pll_read;
1027
	atom_card_info->pll_write = cail_pll_write;
1028
 
1029
	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
3764 Serge 1030
	if (!rdev->mode_info.atom_context) {
1031
		radeon_atombios_fini(rdev);
1032
		return -ENOMEM;
1033
	}
1034
 
1630 serge 1035
	mutex_init(&rdev->mode_info.atom_context->mutex);
5271 serge 1036
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
6104 serge 1037
	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1321 serge 1038
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
6104 serge 1039
	return 0;
1117 serge 1040
}
1041
 
2997 Serge 1042
/**
1043
 * radeon_atombios_fini - free the driver info and callbacks for atombios
1044
 *
1045
 * @rdev: radeon_device pointer
1046
 *
1047
 * Frees the driver info and register access callbacks for the ATOM
1048
 * interpreter (r4xx+).
1049
 * Called at driver shutdown.
1050
 */
1117 serge 1051
void radeon_atombios_fini(struct radeon_device *rdev)
1052
{
1321 serge 1053
	if (rdev->mode_info.atom_context) {
1054
		kfree(rdev->mode_info.atom_context->scratch);
3764 Serge 1055
	}
1119 serge 1056
	kfree(rdev->mode_info.atom_context);
3764 Serge 1057
	rdev->mode_info.atom_context = NULL;
1268 serge 1058
	kfree(rdev->mode_info.atom_card_info);
3764 Serge 1059
	rdev->mode_info.atom_card_info = NULL;
1117 serge 1060
}
1061
 
2997 Serge 1062
/* COMBIOS */
1063
/*
1064
 * COMBIOS is the bios format prior to ATOM. It provides
1065
 * command tables similar to ATOM, but doesn't have a unified
1066
 * parser.  See radeon_combios.c
1067
 */
1068
 
1069
/**
1070
 * radeon_combios_init - init the driver info for combios
1071
 *
1072
 * @rdev: radeon_device pointer
1073
 *
1074
 * Initializes the driver info for combios (r1xx-r3xx).
1075
 * Returns 0 on sucess.
1076
 * Called at driver startup.
1077
 */
1117 serge 1078
int radeon_combios_init(struct radeon_device *rdev)
1079
{
1128 serge 1080
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1117 serge 1081
	return 0;
1082
}
1083
 
2997 Serge 1084
/**
1085
 * radeon_combios_fini - free the driver info for combios
1086
 *
1087
 * @rdev: radeon_device pointer
1088
 *
1089
 * Frees the driver info for combios (r1xx-r3xx).
1090
 * Called at driver shutdown.
1091
 */
1117 serge 1092
void radeon_combios_fini(struct radeon_device *rdev)
1093
{
1094
}
1095
 
2997 Serge 1096
/* if we get transitioned to only one device, take VGA back */
1097
/**
1098
 * radeon_vga_set_decode - enable/disable vga decode
1099
 *
1100
 * @cookie: radeon_device pointer
1101
 * @state: enable/disable vga decode
1102
 *
1103
 * Enable/disable vga decode (all asics).
1104
 * Returns VGA resource flags.
1105
 */
1233 serge 1106
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1107
{
1108
	struct radeon_device *rdev = cookie;
1109
	radeon_vga_set_state(rdev, state);
1110
	if (state)
1111
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1112
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1113
	else
1114
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1115
}
1117 serge 1116
 
2997 Serge 1117
/**
1118
 * radeon_check_pot_argument - check that argument is a power of two
1119
 *
1120
 * @arg: value to check
1121
 *
1122
 * Validates that a certain argument is a power of two (all asics).
1123
 * Returns true if argument is valid.
1124
 */
1125
static bool radeon_check_pot_argument(int arg)
1404 serge 1126
{
2997 Serge 1127
	return (arg & (arg - 1)) == 0;
1128
}
1129
 
1130
/**
6104 serge 1131
 * Determine a sensible default GART size according to ASIC family.
1132
 *
1133
 * @family ASIC family name
1134
 */
1135
static int radeon_gart_size_auto(enum radeon_family family)
1136
{
1137
	/* default to a larger gart size on newer asics */
1138
	if (family >= CHIP_TAHITI)
1139
		return 2048;
1140
	else if (family >= CHIP_RV770)
1141
		return 1024;
1142
	else
1143
		return 512;
1144
}
1145
 
1146
/**
2997 Serge 1147
 * radeon_check_arguments - validate module params
1148
 *
1149
 * @rdev: radeon_device pointer
1150
 *
1151
 * Validates certain module parameters and updates
1152
 * the associated values used by the driver (all asics).
1153
 */
1154
static void radeon_check_arguments(struct radeon_device *rdev)
1155
{
1404 serge 1156
	/* vramlimit must be a power of two */
2997 Serge 1157
	if (!radeon_check_pot_argument(radeon_vram_limit)) {
1404 serge 1158
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1159
				radeon_vram_limit);
1160
		radeon_vram_limit = 0;
1161
	}
2997 Serge 1162
 
5078 serge 1163
	if (radeon_gart_size == -1) {
6104 serge 1164
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
5078 serge 1165
	}
1404 serge 1166
	/* gtt size must be power of two and greater or equal to 32M */
2997 Serge 1167
	if (radeon_gart_size < 32) {
5078 serge 1168
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1404 serge 1169
				radeon_gart_size);
6104 serge 1170
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
2997 Serge 1171
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1404 serge 1172
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1173
				radeon_gart_size);
6104 serge 1174
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1404 serge 1175
	}
2997 Serge 1176
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1177
 
1404 serge 1178
	/* AGP mode can only be -1, 1, 2, 4, 8 */
1179
	switch (radeon_agpmode) {
1180
	case -1:
1181
	case 0:
1182
	case 1:
1183
	case 2:
1184
	case 4:
1185
	case 8:
1186
		break;
1187
	default:
1188
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1189
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1190
		radeon_agpmode = 0;
1191
		break;
1192
	}
5078 serge 1193
 
1194
	if (!radeon_check_pot_argument(radeon_vm_size)) {
1195
		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1196
			 radeon_vm_size);
1197
		radeon_vm_size = 4;
1198
	}
1199
 
1200
	if (radeon_vm_size < 1) {
1201
		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1202
			 radeon_vm_size);
1203
		radeon_vm_size = 4;
1204
	}
1205
 
1206
       /*
1207
        * Max GPUVM size for Cayman, SI and CI are 40 bits.
1208
        */
1209
	if (radeon_vm_size > 1024) {
1210
		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1211
			 radeon_vm_size);
1212
		radeon_vm_size = 4;
1213
	}
1214
 
1215
	/* defines number of bits in page table versus page directory,
1216
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1217
	 * page table and the remaining bits are in the page directory */
1218
	if (radeon_vm_block_size == -1) {
1219
 
1220
		/* Total bits covered by PD + PTs */
5179 serge 1221
		unsigned bits = ilog2(radeon_vm_size) + 18;
5078 serge 1222
 
1223
		/* Make sure the PD is 4K in size up to 8GB address space.
1224
		   Above that split equal between PD and PTs */
1225
		if (radeon_vm_size <= 8)
1226
			radeon_vm_block_size = bits - 9;
1227
		else
1228
			radeon_vm_block_size = (bits + 3) / 2;
1229
 
1230
	} else if (radeon_vm_block_size < 9) {
1231
		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1232
			 radeon_vm_block_size);
1233
		radeon_vm_block_size = 9;
1234
	}
1235
 
1236
	if (radeon_vm_block_size > 24 ||
1237
	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1238
		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1239
			 radeon_vm_block_size);
1240
		radeon_vm_block_size = 9;
1241
	}
1404 serge 1242
}
1243
 
5078 serge 1244
/**
1245
 * radeon_device_init - initialize the driver
1246
 *
1247
 * @rdev: radeon_device pointer
1248
 * @pdev: drm dev pointer
1249
 * @pdev: pci dev pointer
1250
 * @flags: driver flags
1251
 *
1252
 * Initializes the driver info and hw (all asics).
1253
 * Returns 0 for success or an error on failure.
1254
 * Called at driver startup.
1255
 */
1117 serge 1256
int radeon_device_init(struct radeon_device *rdev,
6104 serge 1257
		       struct drm_device *ddev,
1258
		       struct pci_dev *pdev,
1259
		       uint32_t flags)
1117 serge 1260
{
1963 serge 1261
	int r, i;
1179 serge 1262
	int dma_bits;
5078 serge 1263
	bool runtime = false;
1117 serge 1264
 
6104 serge 1265
	rdev->shutdown = false;
5078 serge 1266
	rdev->dev = &pdev->dev;
6104 serge 1267
	rdev->ddev = ddev;
1268
	rdev->pdev = pdev;
1269
	rdev->flags = flags;
1270
	rdev->family = flags & RADEON_FAMILY_MASK;
1271
	rdev->is_atom_bios = false;
1272
	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
5078 serge 1273
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1221 serge 1274
	rdev->accel_working = false;
2997 Serge 1275
	/* set up ring ids */
1276
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1277
		rdev->ring[i].idx = i;
1278
	}
5271 serge 1279
	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1963 serge 1280
 
2997 Serge 1281
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1282
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1283
		pdev->subsystem_vendor, pdev->subsystem_device);
1963 serge 1284
 
6104 serge 1285
	/* mutex initialization are all done here so we
1286
	 * can recall function without having locking issues */
2997 Serge 1287
	mutex_init(&rdev->ring_lock);
1630 serge 1288
	mutex_init(&rdev->dc_hw_i2c_mutex);
2997 Serge 1289
	atomic_set(&rdev->ih.lock, 0);
1630 serge 1290
	mutex_init(&rdev->gem.mutex);
1291
	mutex_init(&rdev->pm.mutex);
2997 Serge 1292
	mutex_init(&rdev->gpu_clock_mutex);
5078 serge 1293
	mutex_init(&rdev->srbm_mutex);
5271 serge 1294
	mutex_init(&rdev->grbm_idx_mutex);
5346 serge 1295
	init_rwsem(&rdev->pm.mclk_lock);
1296
	init_rwsem(&rdev->exclusive_lock);
2997 Serge 1297
	init_waitqueue_head(&rdev->irq.vblank_queue);
5271 serge 1298
	mutex_init(&rdev->mn_lock);
1299
//	hash_init(rdev->mn_hash);
2997 Serge 1300
	r = radeon_gem_init(rdev);
1301
	if (r)
1302
		return r;
5078 serge 1303
 
1304
	radeon_check_arguments(rdev);
2997 Serge 1305
	/* Adjust VM size here.
5078 serge 1306
	 * Max GPUVM size for cayman+ is 40 bits.
2997 Serge 1307
	 */
5078 serge 1308
	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1117 serge 1309
 
1179 serge 1310
	/* Set asic functions */
1311
	r = radeon_asic_init(rdev);
1404 serge 1312
	if (r)
1179 serge 1313
		return r;
1314
 
1963 serge 1315
	/* all of the newer IGP chips have an internal gart
1316
	 * However some rs4xx report as AGP, so remove that here.
1317
	 */
1318
	if ((rdev->family >= CHIP_RS400) &&
1319
	    (rdev->flags & RADEON_IS_IGP)) {
1320
		rdev->flags &= ~RADEON_IS_AGP;
1321
	}
1322
 
1321 serge 1323
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1221 serge 1324
		radeon_agp_disable(rdev);
6104 serge 1325
	}
1117 serge 1326
 
3764 Serge 1327
	/* Set the internal MC address mask
1328
	 * This is the max address of the GPU's
1329
	 * internal address space.
1330
	 */
1331
	if (rdev->family >= CHIP_CAYMAN)
1332
		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1333
	else if (rdev->family >= CHIP_CEDAR)
1334
		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1335
	else
1336
		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1337
 
1179 serge 1338
	/* set DMA mask + need_dma32 flags.
1339
	 * PCIE - can handle 40-bits.
2997 Serge 1340
	 * IGP - can handle 40-bits
1179 serge 1341
	 * AGP - generally dma32 is safest
2997 Serge 1342
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1179 serge 1343
	 */
1344
	rdev->need_dma32 = false;
1345
	if (rdev->flags & RADEON_IS_AGP)
1346
		rdev->need_dma32 = true;
2997 Serge 1347
	if ((rdev->flags & RADEON_IS_PCI) &&
1348
	    (rdev->family <= CHIP_RS740))
1179 serge 1349
		rdev->need_dma32 = true;
1117 serge 1350
 
1179 serge 1351
	dma_bits = rdev->need_dma32 ? 32 : 40;
1352
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
6104 serge 1353
	if (r) {
1986 serge 1354
		rdev->need_dma32 = true;
2997 Serge 1355
		dma_bits = 32;
6104 serge 1356
		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1357
	}
1117 serge 1358
 
6104 serge 1359
	/* Registers mapping */
1360
	/* TODO: block userspace mapping of io register */
3192 Serge 1361
	spin_lock_init(&rdev->mmio_idx_lock);
5078 serge 1362
	spin_lock_init(&rdev->smc_idx_lock);
1363
	spin_lock_init(&rdev->pll_idx_lock);
1364
	spin_lock_init(&rdev->mc_idx_lock);
1365
	spin_lock_init(&rdev->pcie_idx_lock);
1366
	spin_lock_init(&rdev->pciep_idx_lock);
1367
	spin_lock_init(&rdev->pif_idx_lock);
1368
	spin_lock_init(&rdev->cg_idx_lock);
1369
	spin_lock_init(&rdev->uvd_idx_lock);
1370
	spin_lock_init(&rdev->rcu_idx_lock);
1371
	spin_lock_init(&rdev->didt_idx_lock);
1372
	spin_lock_init(&rdev->end_idx_lock);
1373
	if (rdev->family >= CHIP_BONAIRE) {
1374
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1375
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1376
	} else {
6104 serge 1377
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1378
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
5078 serge 1379
	}
2997 Serge 1380
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
6104 serge 1381
	if (rdev->rmmio == NULL) {
1382
		return -ENOMEM;
1383
	}
1384
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1385
	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1117 serge 1386
 
5078 serge 1387
	/* doorbell bar mapping */
1388
	if (rdev->family >= CHIP_BONAIRE)
1389
		radeon_doorbell_init(rdev);
1390
 
2997 Serge 1391
	/* io port mapping */
1392
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1393
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1394
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1395
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1396
			break;
1397
		}
1398
	}
1399
	if (rdev->rio_mem == NULL)
1400
		DRM_ERROR("Unable to find PCI I/O BAR\n");
1401
 
5078 serge 1402
	if (rdev->flags & RADEON_IS_PX)
1403
		radeon_device_handle_px_quirks(rdev);
1404
	if (rdev->flags & RADEON_IS_PX)
1405
		runtime = true;
2997 Serge 1406
 
1179 serge 1407
	r = radeon_init(rdev);
1221 serge 1408
	if (r)
6104 serge 1409
		goto failed;
1117 serge 1410
 
3192 Serge 1411
 
5078 serge 1412
 
1221 serge 1413
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1414
		/* Acceleration not working on AGP card try again
1415
		 * with fallback to PCI or PCIE GART
1416
		 */
1963 serge 1417
		radeon_asic_reset(rdev);
1221 serge 1418
		radeon_fini(rdev);
1419
		radeon_agp_disable(rdev);
1420
		r = radeon_init(rdev);
1421
		if (r)
6104 serge 1422
			goto failed;
1126 serge 1423
	}
5078 serge 1424
 
5271 serge 1425
//   r = radeon_ib_ring_tests(rdev);
1426
//   if (r)
1427
//       DRM_ERROR("ib ring test failed (%d).\n", r);
1428
 
5078 serge 1429
	if ((radeon_testing & 1)) {
1430
		if (rdev->accel_working)
1431
			radeon_test_moves(rdev);
1432
		else
1433
			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1434
	}
1435
	if ((radeon_testing & 2)) {
1436
		if (rdev->accel_working)
1437
			radeon_test_syncing(rdev);
1438
		else
1439
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1440
	}
6104 serge 1441
	if (radeon_benchmarking) {
5078 serge 1442
		if (rdev->accel_working)
6104 serge 1443
			radeon_benchmark(rdev, radeon_benchmarking);
5078 serge 1444
		else
1445
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
6104 serge 1446
	}
1179 serge 1447
	return 0;
6104 serge 1448
 
1449
failed:
1450
	return r;
1117 serge 1451
}
1452
 
2997 Serge 1453
/**
1454
 * radeon_gpu_reset - reset the asic
1455
 *
1456
 * @rdev: radeon device pointer
1457
 *
1458
 * Attempt the reset the GPU if it has hung (all asics).
1459
 * Returns 0 for success or an error on failure.
1460
 */
1461
int radeon_gpu_reset(struct radeon_device *rdev)
1462
{
1463
    unsigned ring_sizes[RADEON_NUM_RINGS];
1464
    uint32_t *ring_data[RADEON_NUM_RINGS];
1179 serge 1465
 
2997 Serge 1466
    bool saved = false;
1467
 
1468
    int i, r;
1469
    int resched;
1470
 
5346 serge 1471
	down_write(&rdev->exclusive_lock);
5078 serge 1472
 
5346 serge 1473
	if (!rdev->needs_reset) {
1474
		up_write(&rdev->exclusive_lock);
1475
		return 0;
1476
	}
1477
 
6661 serge 1478
	atomic_inc(&rdev->gpu_reset_counter);
1479
 
2997 Serge 1480
    radeon_save_bios_scratch_regs(rdev);
1481
    /* block TTM */
1482
//    resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1483
    radeon_suspend(rdev);
1484
 
1485
    for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1486
        ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1487
                           &ring_data[i]);
1488
        if (ring_sizes[i]) {
1489
            saved = true;
1490
            dev_info(rdev->dev, "Saved %d dwords of commands "
1491
                 "on ring %d.\n", ring_sizes[i], i);
1492
        }
1493
    }
1494
 
1495
    r = radeon_asic_reset(rdev);
1496
    if (!r) {
1497
        dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1498
        radeon_resume(rdev);
1499
    }
1500
 
1501
    radeon_restore_bios_scratch_regs(rdev);
1502
 
1503
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
5271 serge 1504
		if (!r && ring_data[i]) {
2997 Serge 1505
            radeon_ring_restore(rdev, &rdev->ring[i],
1506
                        ring_sizes[i], ring_data[i]);
1507
    } else {
5271 serge 1508
			radeon_fence_driver_force_completion(rdev, i);
2997 Serge 1509
            kfree(ring_data[i]);
1510
        }
1511
    }
1512
 
1513
//    ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1514
    if (r) {
1515
        /* bad news, how to tell it to userspace ? */
1516
        dev_info(rdev->dev, "GPU reset failed\n");
1517
    }
1518
 
5346 serge 1519
	rdev->needs_reset = r == -EAGAIN;
1520
	rdev->in_reset = false;
1521
 
1522
	up_read(&rdev->exclusive_lock);
2997 Serge 1523
    return r;
1524
}
1525
 
1526
 
1117 serge 1527
/*
1528
 * Driver load/unload
1529
 */
1530
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
1531
{
1532
    struct radeon_device *rdev;
1533
    int r;
1534
 
1535
 
1120 serge 1536
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1117 serge 1537
    if (rdev == NULL) {
1538
        return -ENOMEM;
1539
    };
1540
 
1541
    dev->dev_private = (void *)rdev;
1542
 
1543
    /* update BUS flag */
5097 serge 1544
    if (drm_pci_device_is_agp(dev)) {
1117 serge 1545
        flags |= RADEON_IS_AGP;
1239 serge 1546
    } else if (drm_device_is_pcie(dev)) {
1547
        flags |= RADEON_IS_PCIE;
1548
    } else {
1549
        flags |= RADEON_IS_PCI;
1550
    }
1117 serge 1551
 
1182 serge 1552
    /* radeon_device_init should report only fatal error
1553
     * like memory allocation failure or iomapping failure,
1554
     * or memory manager initialization failure, it must
1555
     * properly initialize the GPU MC controller and permit
1556
     * VRAM allocation
1557
     */
1117 serge 1558
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
1559
    if (r) {
1182 serge 1560
        DRM_ERROR("Fatal error while trying to initialize radeon.\n");
1117 serge 1561
        return r;
1562
    }
1182 serge 1563
    /* Again modeset_init should fail only on fatal error
1564
     * otherwise it should provide enough functionalities
1565
     * for shadowfb to run
1566
     */
5078 serge 1567
    main_device = dev;
1568
 
1246 serge 1569
    if( radeon_modeset )
1570
    {
1268 serge 1571
        r = radeon_modeset_init(rdev);
1572
        if (r) {
1573
            return r;
1574
        }
5078 serge 1575
        init_display_kms(dev, &usermode);
1576
    }
1986 serge 1577
    else
5078 serge 1578
        init_display(rdev, &usermode);
1126 serge 1579
 
1117 serge 1580
    return 0;
5078 serge 1581
}
1117 serge 1582
 
1583
 
1221 serge 1584
 
1117 serge 1585
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
1586
{
1587
    return pci_resource_start(dev->pdev, resource);
1588
}
1589
 
1590
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
1591
{
1592
    return pci_resource_len(dev->pdev, resource);
1593
}
1594
 
1123 serge 1595
 
1596
uint32_t __div64_32(uint64_t *n, uint32_t base)
1597
{
1598
        uint64_t rem = *n;
1599
        uint64_t b = base;
1600
        uint64_t res, d = 1;
1601
        uint32_t high = rem >> 32;
1602
 
1603
        /* Reduce the thing a bit first */
1604
        res = 0;
1605
        if (high >= base) {
1606
                high /= base;
1607
                res = (uint64_t) high << 32;
1608
                rem -= (uint64_t) (high*base) << 32;
1609
        }
1610
 
1611
        while ((int64_t)b > 0 && b < rem) {
1612
                b = b+b;
1613
                d = d+d;
1614
        }
1615
 
1616
        do {
1617
                if (rem >= b) {
1618
                        rem -= b;
1619
                        res += d;
1620
                }
1621
                b >>= 1;
1622
                d >>= 1;
1623
        } while (d);
1624
 
1625
        *n = res;
1626
        return rem;
1627
}
1628
 
1239 serge 1629
static struct pci_device_id pciidlist[] = {
1630
    radeon_PCI_IDS
1631
};
1632
 
6104 serge 1633
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1634
int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1635
void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1636
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
1637
				    int *max_error,
1638
				    struct timeval *vblank_time,
1639
				    unsigned flags);
1640
void radeon_gem_object_free(struct drm_gem_object *obj);
5078 serge 1641
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1642
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1643
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
1644
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
1239 serge 1645
 
1646
 
5078 serge 1647
static struct drm_driver kms_driver = {
1648
    .driver_features =
1649
        DRIVER_USE_AGP |
1650
        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
1651
        DRIVER_PRIME | DRIVER_RENDER,
1652
    .load = radeon_driver_load_kms,
1653
//    .open = radeon_driver_open_kms,
1654
//    .preclose = radeon_driver_preclose_kms,
1655
//    .postclose = radeon_driver_postclose_kms,
1656
//    .lastclose = radeon_driver_lastclose_kms,
1657
//    .unload = radeon_driver_unload_kms,
6104 serge 1658
    .get_vblank_counter = radeon_get_vblank_counter_kms,
1659
    .enable_vblank = radeon_enable_vblank_kms,
1660
    .disable_vblank = radeon_disable_vblank_kms,
1661
    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1662
    .get_scanout_position = radeon_get_crtc_scanoutpos,
5078 serge 1663
#if defined(CONFIG_DEBUG_FS)
1664
    .debugfs_init = radeon_debugfs_init,
1665
    .debugfs_cleanup = radeon_debugfs_cleanup,
1666
#endif
1667
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1668
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1669
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1670
    .irq_handler = radeon_driver_irq_handler_kms,
1671
//    .ioctls = radeon_ioctls_kms,
6104 serge 1672
    .gem_free_object = radeon_gem_object_free,
5078 serge 1673
//    .gem_open_object = radeon_gem_object_open,
1674
//    .gem_close_object = radeon_gem_object_close,
1675
//    .dumb_create = radeon_mode_dumb_create,
1676
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1677
//    .dumb_destroy = drm_gem_dumb_destroy,
1678
//    .fops = &radeon_driver_kms_fops,
3120 serge 1679
 
5078 serge 1680
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1681
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1682
//    .gem_prime_export = drm_gem_prime_export,
1683
//    .gem_prime_import = drm_gem_prime_import,
1684
//    .gem_prime_pin = radeon_gem_prime_pin,
1685
//    .gem_prime_unpin = radeon_gem_prime_unpin,
1686
//    .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
1687
//    .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
1688
//    .gem_prime_vmap = radeon_gem_prime_vmap,
1689
//    .gem_prime_vunmap = radeon_gem_prime_vunmap,
1239 serge 1690
 
5078 serge 1691
};
2007 serge 1692
 
5078 serge 1693
int ati_init(void)
1239 serge 1694
{
5078 serge 1695
    static pci_dev_t device;
2997 Serge 1696
    const struct pci_device_id  *ent;
5078 serge 1697
    int  err;
1239 serge 1698
 
1699
    ent = find_pci_device(&device, pciidlist);
1700
    if( unlikely(ent == NULL) )
1701
    {
1702
        dbgprintf("device not found\n");
5078 serge 1703
        return -ENODEV;
1239 serge 1704
    };
1705
 
5078 serge 1706
    drm_core_init();
1707
 
1708
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1239 serge 1709
                                device.pci_dev.device);
1710
 
5078 serge 1711
    kms_driver.driver_features |= DRIVER_MODESET;
3764 Serge 1712
 
5078 serge 1713
    err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
1239 serge 1714
 
1246 serge 1715
    return err;
5078 serge 1716
}
1430 serge 1717