Subversion Repositories Kolibri OS

Rev

Rev 1129 | Rev 1182 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include 
1123 serge 29
 
1179 serge 30
#include 
31
#include 
1117 serge 32
#include "radeon_drm.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
35
#include "radeon_asic.h"
36
#include "atom.h"
37
 
38
#include 
39
 
40
int radeon_dynclks = -1;
1123 serge 41
int radeon_r4xx_atom = 0;
1125 serge 42
int radeon_agpmode   = -1;
1117 serge 43
int radeon_gart_size = 512; /* default gart size */
1123 serge 44
int radeon_benchmarking = 0;
45
int radeon_connector_table = 0;
1179 serge 46
int radeon_tv = 1;
1117 serge 47
 
48
 
49
/*
50
 * Clear GPU surface registers.
51
 */
1179 serge 52
void radeon_surface_init(struct radeon_device *rdev)
1117 serge 53
{
1179 serge 54
    ENTER();
1117 serge 55
 
56
    /* FIXME: check this out */
57
    if (rdev->family < CHIP_R600) {
58
        int i;
59
 
60
        for (i = 0; i < 8; i++) {
61
            WREG32(RADEON_SURFACE0_INFO +
62
                   i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
63
                   0);
64
        }
1179 serge 65
		/* enable surfaces */
66
		WREG32(RADEON_SURFACE_CNTL, 0);
1117 serge 67
    }
68
}
69
 
70
/*
71
 * GPU scratch registers helpers function.
72
 */
1179 serge 73
void radeon_scratch_init(struct radeon_device *rdev)
1117 serge 74
{
75
    int i;
76
 
77
    /* FIXME: check this out */
78
    if (rdev->family < CHIP_R300) {
79
        rdev->scratch.num_reg = 5;
80
    } else {
81
        rdev->scratch.num_reg = 7;
82
    }
83
    for (i = 0; i < rdev->scratch.num_reg; i++) {
84
        rdev->scratch.free[i] = true;
85
        rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
86
    }
87
}
88
 
89
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
90
{
91
	int i;
92
 
93
	for (i = 0; i < rdev->scratch.num_reg; i++) {
94
		if (rdev->scratch.free[i]) {
95
			rdev->scratch.free[i] = false;
96
			*reg = rdev->scratch.reg[i];
97
			return 0;
98
		}
99
	}
100
	return -EINVAL;
101
}
102
 
103
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
104
{
105
	int i;
106
 
107
	for (i = 0; i < rdev->scratch.num_reg; i++) {
108
		if (rdev->scratch.reg[i] == reg) {
109
			rdev->scratch.free[i] = true;
110
			return;
111
		}
112
	}
113
}
114
 
115
/*
116
 * MC common functions
117
 */
118
int radeon_mc_setup(struct radeon_device *rdev)
119
{
120
	uint32_t tmp;
121
 
122
	/* Some chips have an "issue" with the memory controller, the
123
	 * location must be aligned to the size. We just align it down,
124
	 * too bad if we walk over the top of system memory, we don't
125
	 * use DMA without a remapped anyway.
126
	 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
127
	 */
128
	/* FGLRX seems to setup like this, VRAM a 0, then GART.
129
	 */
1126 serge 130
	/*
1117 serge 131
	 * Note: from R6xx the address space is 40bits but here we only
132
	 * use 32bits (still have to see a card which would exhaust 4G
133
	 * address space).
134
	 */
135
	if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
136
		/* vram location was already setup try to put gtt after
137
		 * if it fits */
1179 serge 138
		tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
1117 serge 139
		tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
140
		if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
141
			rdev->mc.gtt_location = tmp;
142
		} else {
143
			if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
144
				printk(KERN_ERR "[drm] GTT too big to fit "
145
				       "before or after vram location.\n");
146
				return -EINVAL;
147
			}
148
			rdev->mc.gtt_location = 0;
149
		}
150
	} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
151
		/* gtt location was already setup try to put vram before
152
		 * if it fits */
1179 serge 153
		if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
1117 serge 154
			rdev->mc.vram_location = 0;
155
		} else {
156
			tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
1179 serge 157
			tmp += (rdev->mc.mc_vram_size - 1);
158
			tmp &= ~(rdev->mc.mc_vram_size - 1);
159
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
1117 serge 160
				rdev->mc.vram_location = tmp;
161
			} else {
162
				printk(KERN_ERR "[drm] vram too big to fit "
163
				       "before or after GTT location.\n");
164
				return -EINVAL;
165
			}
166
		}
167
	} else {
168
		rdev->mc.vram_location = 0;
1179 serge 169
		tmp = rdev->mc.mc_vram_size;
170
		tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
171
		rdev->mc.gtt_location = tmp;
1117 serge 172
	}
1179 serge 173
	rdev->mc.vram_start = rdev->mc.vram_location;
174
	rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
175
	rdev->mc.gtt_start = rdev->mc.gtt_location;
176
	rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
177
	DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
1117 serge 178
	DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
1179 serge 179
		 (unsigned)rdev->mc.vram_location,
180
		 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
181
	DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
1117 serge 182
	DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
1179 serge 183
		 (unsigned)rdev->mc.gtt_location,
184
		 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
1117 serge 185
	return 0;
186
}
187
 
188
 
189
/*
190
 * GPU helpers function.
191
 */
1179 serge 192
bool radeon_card_posted(struct radeon_device *rdev)
1117 serge 193
{
194
	uint32_t reg;
195
 
1179 serge 196
    ENTER();
1117 serge 197
 
198
	/* first check CRTCs */
199
	if (ASIC_IS_AVIVO(rdev)) {
200
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
201
		      RREG32(AVIVO_D2CRTC_CONTROL);
202
		if (reg & AVIVO_CRTC_EN) {
203
			return true;
204
		}
205
	} else {
206
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
207
		      RREG32(RADEON_CRTC2_GEN_CNTL);
208
		if (reg & RADEON_CRTC_EN) {
209
			return true;
210
		}
211
	}
212
 
213
	/* then check MEM_SIZE, in case the crtcs are off */
214
	if (rdev->family >= CHIP_R600)
215
		reg = RREG32(R600_CONFIG_MEMSIZE);
216
	else
217
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
218
 
219
	if (reg)
220
		return true;
221
 
222
	return false;
223
 
224
}
225
 
226
 
227
/*
228
 * Registers accessors functions.
229
 */
230
uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
231
{
232
    DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
233
    BUG_ON(1);
234
    return 0;
235
}
236
 
237
void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
238
{
239
    DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
240
          reg, v);
241
    BUG_ON(1);
242
}
243
 
244
void radeon_register_accessor_init(struct radeon_device *rdev)
245
{
246
    rdev->mc_rreg = &radeon_invalid_rreg;
247
    rdev->mc_wreg = &radeon_invalid_wreg;
248
    rdev->pll_rreg = &radeon_invalid_rreg;
249
    rdev->pll_wreg = &radeon_invalid_wreg;
250
    rdev->pciep_rreg = &radeon_invalid_rreg;
251
    rdev->pciep_wreg = &radeon_invalid_wreg;
252
 
253
    /* Don't change order as we are overridding accessor. */
254
    if (rdev->family < CHIP_RV515) {
1179 serge 255
		rdev->pcie_reg_mask = 0xff;
256
	} else {
257
		rdev->pcie_reg_mask = 0x7ff;
1117 serge 258
    }
259
    /* FIXME: not sure here */
260
    if (rdev->family <= CHIP_R580) {
1119 serge 261
        rdev->pll_rreg = &r100_pll_rreg;
262
        rdev->pll_wreg = &r100_pll_wreg;
1117 serge 263
    }
1179 serge 264
	if (rdev->family >= CHIP_R420) {
265
		rdev->mc_rreg = &r420_mc_rreg;
266
		rdev->mc_wreg = &r420_mc_wreg;
267
	}
1117 serge 268
    if (rdev->family >= CHIP_RV515) {
269
        rdev->mc_rreg = &rv515_mc_rreg;
270
        rdev->mc_wreg = &rv515_mc_wreg;
271
    }
272
    if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
1128 serge 273
        rdev->mc_rreg = &rs400_mc_rreg;
274
        rdev->mc_wreg = &rs400_mc_wreg;
1117 serge 275
    }
1179 serge 276
//    if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
277
//        rdev->mc_rreg = &rs690_mc_rreg;
278
//        rdev->mc_wreg = &rs690_mc_wreg;
279
//    }
280
//    if (rdev->family == CHIP_RS600) {
281
//        rdev->mc_rreg = &rs600_mc_rreg;
282
//        rdev->mc_wreg = &rs600_mc_wreg;
283
//    }
284
//    if (rdev->family >= CHIP_R600) {
285
//        rdev->pciep_rreg = &r600_pciep_rreg;
286
//        rdev->pciep_wreg = &r600_pciep_wreg;
287
//    }
1117 serge 288
}
289
 
290
 
291
/*
292
 * ASIC
293
 */
294
int radeon_asic_init(struct radeon_device *rdev)
295
{
296
    radeon_register_accessor_init(rdev);
297
	switch (rdev->family) {
298
	case CHIP_R100:
299
	case CHIP_RV100:
300
	case CHIP_RS100:
301
	case CHIP_RV200:
302
	case CHIP_RS200:
303
	case CHIP_R200:
304
	case CHIP_RV250:
305
	case CHIP_RS300:
306
	case CHIP_RV280:
1128 serge 307
        rdev->asic = &r100_asic;
1117 serge 308
		break;
309
	case CHIP_R300:
310
	case CHIP_R350:
311
	case CHIP_RV350:
312
	case CHIP_RV380:
1128 serge 313
        rdev->asic = &r300_asic;
1179 serge 314
		if (rdev->flags & RADEON_IS_PCIE) {
315
			rdev->asic->gart_init = &rv370_pcie_gart_init;
316
			rdev->asic->gart_fini = &rv370_pcie_gart_fini;
317
			rdev->asic->gart_enable = &rv370_pcie_gart_enable;
318
			rdev->asic->gart_disable = &rv370_pcie_gart_disable;
319
			rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
320
			rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
321
		}
1117 serge 322
		break;
323
	case CHIP_R420:
324
	case CHIP_R423:
325
	case CHIP_RV410:
1128 serge 326
        rdev->asic = &r420_asic;
1117 serge 327
		break;
328
	case CHIP_RS400:
329
	case CHIP_RS480:
1128 serge 330
       rdev->asic = &rs400_asic;
1117 serge 331
		break;
332
	case CHIP_RS600:
1179 serge 333
//       rdev->asic = &rs600_asic;
1117 serge 334
		break;
335
	case CHIP_RS690:
336
	case CHIP_RS740:
1179 serge 337
//        rdev->asic = &rs690_asic;
1117 serge 338
		break;
339
	case CHIP_RV515:
1128 serge 340
        rdev->asic = &rv515_asic;
1117 serge 341
		break;
342
	case CHIP_R520:
343
	case CHIP_RV530:
344
	case CHIP_RV560:
345
	case CHIP_RV570:
346
	case CHIP_R580:
347
        rdev->asic = &r520_asic;
348
		break;
349
	case CHIP_R600:
350
	case CHIP_RV610:
351
	case CHIP_RV630:
352
	case CHIP_RV620:
353
	case CHIP_RV635:
354
	case CHIP_RV670:
355
	case CHIP_RS780:
356
	case CHIP_RV770:
357
	case CHIP_RV730:
358
	case CHIP_RV710:
359
	default:
360
		/* FIXME: not supported yet */
361
		return -EINVAL;
362
	}
363
	return 0;
364
}
365
 
366
 
367
/*
368
 * Wrapper around modesetting bits.
369
 */
370
int radeon_clocks_init(struct radeon_device *rdev)
371
{
372
	int r;
373
 
1179 serge 374
    ENTER();
1117 serge 375
 
376
    r = radeon_static_clocks_init(rdev->ddev);
377
	if (r) {
378
		return r;
379
	}
380
	DRM_INFO("Clocks initialized !\n");
381
	return 0;
382
}
383
 
384
void radeon_clocks_fini(struct radeon_device *rdev)
385
{
386
}
387
 
388
/* ATOM accessor methods */
389
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
390
{
391
    struct radeon_device *rdev = info->dev->dev_private;
392
    uint32_t r;
393
 
394
    r = rdev->pll_rreg(rdev, reg);
395
    return r;
396
}
397
 
398
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
399
{
400
    struct radeon_device *rdev = info->dev->dev_private;
401
 
402
    rdev->pll_wreg(rdev, reg, val);
403
}
404
 
405
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
406
{
407
    struct radeon_device *rdev = info->dev->dev_private;
408
    uint32_t r;
409
 
410
    r = rdev->mc_rreg(rdev, reg);
411
    return r;
412
}
413
 
414
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
415
{
416
    struct radeon_device *rdev = info->dev->dev_private;
417
 
418
    rdev->mc_wreg(rdev, reg, val);
419
}
420
 
421
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
422
{
423
    struct radeon_device *rdev = info->dev->dev_private;
424
 
425
    WREG32(reg*4, val);
426
}
427
 
428
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
429
{
430
    struct radeon_device *rdev = info->dev->dev_private;
431
    uint32_t r;
432
 
433
    r = RREG32(reg*4);
434
    return r;
435
}
436
 
437
static struct card_info atom_card_info = {
438
    .dev = NULL,
439
    .reg_read = cail_reg_read,
440
    .reg_write = cail_reg_write,
441
    .mc_read = cail_mc_read,
442
    .mc_write = cail_mc_write,
443
    .pll_read = cail_pll_read,
444
    .pll_write = cail_pll_write,
445
};
446
 
447
int radeon_atombios_init(struct radeon_device *rdev)
448
{
1179 serge 449
    ENTER();
1117 serge 450
 
451
    atom_card_info.dev = rdev->ddev;
452
    rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
453
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
454
    return 0;
455
}
456
 
457
void radeon_atombios_fini(struct radeon_device *rdev)
458
{
1119 serge 459
	kfree(rdev->mode_info.atom_context);
1117 serge 460
}
461
 
462
int radeon_combios_init(struct radeon_device *rdev)
463
{
1128 serge 464
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1117 serge 465
	return 0;
466
}
467
 
468
void radeon_combios_fini(struct radeon_device *rdev)
469
{
470
}
471
 
472
int radeon_modeset_init(struct radeon_device *rdev);
473
void radeon_modeset_fini(struct radeon_device *rdev);
474
 
1179 serge 475
 
1117 serge 476
/*
477
 * Radeon device.
478
 */
479
int radeon_device_init(struct radeon_device *rdev,
480
               struct drm_device *ddev,
481
               struct pci_dev *pdev,
482
               uint32_t flags)
483
{
1126 serge 484
	int r, ret;
1179 serge 485
	int dma_bits;
1117 serge 486
 
1179 serge 487
    ENTER();
1117 serge 488
 
489
    DRM_INFO("radeon: Initializing kernel modesetting.\n");
490
    rdev->shutdown = false;
491
    rdev->ddev = ddev;
492
    rdev->pdev = pdev;
493
    rdev->flags = flags;
494
    rdev->family = flags & RADEON_FAMILY_MASK;
495
    rdev->is_atom_bios = false;
496
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
497
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
498
    rdev->gpu_lockup = false;
499
    /* mutex initialization are all done here so we
500
     * can recall function without having locking issues */
501
 //   mutex_init(&rdev->cs_mutex);
502
 //   mutex_init(&rdev->ib_pool.mutex);
503
 //   mutex_init(&rdev->cp.mutex);
504
 //   rwlock_init(&rdev->fence_drv.lock);
505
 
1179 serge 506
	/* Set asic functions */
507
	r = radeon_asic_init(rdev);
508
	if (r) {
509
		return r;
510
	}
511
 
1117 serge 512
    if (radeon_agpmode == -1) {
513
        rdev->flags &= ~RADEON_IS_AGP;
1179 serge 514
		if (rdev->family >= CHIP_RV515 ||
1117 serge 515
            rdev->family == CHIP_RV380 ||
516
            rdev->family == CHIP_RV410 ||
517
            rdev->family == CHIP_R423) {
518
            DRM_INFO("Forcing AGP to PCIE mode\n");
519
            rdev->flags |= RADEON_IS_PCIE;
1179 serge 520
			rdev->asic->gart_init = &rv370_pcie_gart_init;
521
			rdev->asic->gart_fini = &rv370_pcie_gart_fini;
522
			rdev->asic->gart_enable = &rv370_pcie_gart_enable;
523
			rdev->asic->gart_disable = &rv370_pcie_gart_disable;
524
			rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
525
			rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
1117 serge 526
        } else {
527
            DRM_INFO("Forcing AGP to PCI mode\n");
528
            rdev->flags |= RADEON_IS_PCI;
1179 serge 529
			rdev->asic->gart_init = &r100_pci_gart_init;
530
			rdev->asic->gart_fini = &r100_pci_gart_fini;
531
			rdev->asic->gart_enable = &r100_pci_gart_enable;
532
			rdev->asic->gart_disable = &r100_pci_gart_disable;
533
			rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
534
			rdev->asic->gart_set_page = &r100_pci_gart_set_page;
1117 serge 535
        }
536
    }
537
 
1179 serge 538
	/* set DMA mask + need_dma32 flags.
539
	 * PCIE - can handle 40-bits.
540
	 * IGP - can handle 40-bits (in theory)
541
	 * AGP - generally dma32 is safest
542
	 * PCI - only dma32
543
	 */
544
	rdev->need_dma32 = false;
545
	if (rdev->flags & RADEON_IS_AGP)
546
		rdev->need_dma32 = true;
547
	if (rdev->flags & RADEON_IS_PCI)
548
		rdev->need_dma32 = true;
1117 serge 549
 
1179 serge 550
	dma_bits = rdev->need_dma32 ? 32 : 40;
551
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1117 serge 552
    if (r) {
1119 serge 553
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
554
    }
1117 serge 555
 
556
    /* Registers mapping */
557
    /* TODO: block userspace mapping of io register */
558
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
559
 
560
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
561
 
562
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
563
                                   PG_SW+PG_NOCACHE);
564
 
565
    if (rdev->rmmio == NULL) {
566
        return -ENOMEM;
567
    }
568
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
569
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
570
 
1179 serge 571
	rdev->new_init_path = false;
572
	r = radeon_init(rdev);
573
	if (r) {
574
		return r;
575
	}
576
 
577
	if (!rdev->new_init_path) {
1117 serge 578
    /* Setup errata flags */
579
    radeon_errata(rdev);
580
    /* Initialize scratch registers */
581
    radeon_scratch_init(rdev);
582
	/* Initialize surface registers */
583
    radeon_surface_init(rdev);
584
 
585
    /* BIOS*/
586
    if (!radeon_get_bios(rdev)) {
587
        if (ASIC_IS_AVIVO(rdev))
588
            return -EINVAL;
589
    }
590
    if (rdev->is_atom_bios) {
591
        r = radeon_atombios_init(rdev);
592
        if (r) {
593
            return r;
594
        }
595
    } else {
596
        r = radeon_combios_init(rdev);
597
        if (r) {
598
            return r;
599
        }
600
    }
601
    /* Reset gpu before posting otherwise ATOM will enter infinite loop */
602
    if (radeon_gpu_reset(rdev)) {
603
        /* FIXME: what do we want to do here ? */
604
    }
605
    /* check if cards are posted or not */
606
    if (!radeon_card_posted(rdev) && rdev->bios) {
607
        DRM_INFO("GPU not posted. posting now...\n");
608
        if (rdev->is_atom_bios) {
609
            atom_asic_init(rdev->mode_info.atom_context);
610
        } else {
1129 serge 611
			radeon_combios_asic_init(rdev->ddev);
1117 serge 612
        }
613
    }
1179 serge 614
		/* Get clock & vram information */
615
		radeon_get_clock_info(rdev->ddev);
616
		radeon_vram_info(rdev);
617
	/* Initialize clocks */
618
	r = radeon_clocks_init(rdev);
619
	if (r) {
620
		return r;
621
	}
1117 serge 622
 
1179 serge 623
	/* Initialize memory controller (also test AGP) */
624
	r = radeon_mc_init(rdev);
625
	if (r) {
626
		return r;
1126 serge 627
	}
1117 serge 628
    /* Memory manager */
1120 serge 629
    r = radeon_object_init(rdev);
630
    if (r) {
631
        return r;
632
    }
1179 serge 633
		r = radeon_gpu_gart_init(rdev);
634
		if (r)
635
			return r;
1117 serge 636
    /* Initialize GART (initialize after TTM so we can allocate
637
     * memory through TTM but finalize after TTM) */
638
    r = radeon_gart_enable(rdev);
1179 serge 639
		if (r)
640
			return 0;
1126 serge 641
        r = radeon_gem_init(rdev);
1179 serge 642
		if (r)
643
			return 0;
1117 serge 644
 
645
    /* 1M ring buffer */
1179 serge 646
//        r = radeon_cp_init(rdev, 1024 * 1024);
647
//       if (r)
648
//           return 0;
1125 serge 649
#if 0
1179 serge 650
		r = radeon_wb_init(rdev);
651
		if (r)
652
			DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
1117 serge 653
        r = radeon_ib_pool_init(rdev);
1179 serge 654
		if (r)
655
			return 0;
656
		r = radeon_ib_test(rdev);
657
		if (r)
658
			return 0;
1125 serge 659
#endif
1179 serge 660
		rdev->accel_working = true;
1117 serge 661
    r = radeon_modeset_init(rdev);
1179 serge 662
	}
663
	DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
664
//	if (radeon_testing) {
665
//		radeon_test_moves(rdev);
1125 serge 666
//    }
1179 serge 667
//	if (radeon_benchmarking) {
668
//		radeon_benchmark(rdev);
669
//    }
670
	return 0;
1117 serge 671
}
672
 
1179 serge 673
 
1117 serge 674
static struct pci_device_id pciidlist[] = {
675
    radeon_PCI_IDS
676
};
677
 
678
 
1179 serge 679
u32_t drvEntry(int action, char *cmdline)
1117 serge 680
{
681
    struct pci_device_id  *ent;
682
 
683
    dev_t   device;
684
    int     err;
685
    u32_t   retval = 0;
686
 
687
    if(action != 1)
688
        return 0;
689
 
1120 serge 690
    if(!dbg_open("/hd0/2/atikms.log"))
1117 serge 691
    {
1120 serge 692
        printf("Can't open /hd0/2/atikms.log\nExit\n");
1117 serge 693
        return 0;
694
    }
695
 
1179 serge 696
    if(cmdline)
697
        dbgprintf("cmdline: %s\n", cmdline);
698
 
1117 serge 699
    enum_pci_devices();
700
 
701
    ent = find_pci_device(&device, pciidlist);
702
 
703
    if( unlikely(ent == NULL) )
704
    {
705
        dbgprintf("device not found\n");
706
        return 0;
707
    };
708
 
709
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
710
                                device.pci_dev.device);
711
 
712
    err = drm_get_dev(&device.pci_dev, ent);
713
 
714
    return retval;
715
};
716
 
717
/*
718
static struct drm_driver kms_driver = {
719
    .driver_features =
720
        DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
721
        DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
722
    .dev_priv_size = 0,
723
    .load = radeon_driver_load_kms,
724
    .firstopen = radeon_driver_firstopen_kms,
725
    .open = radeon_driver_open_kms,
726
    .preclose = radeon_driver_preclose_kms,
727
    .postclose = radeon_driver_postclose_kms,
728
    .lastclose = radeon_driver_lastclose_kms,
729
    .unload = radeon_driver_unload_kms,
730
    .suspend = radeon_suspend_kms,
731
    .resume = radeon_resume_kms,
732
    .get_vblank_counter = radeon_get_vblank_counter_kms,
733
    .enable_vblank = radeon_enable_vblank_kms,
734
    .disable_vblank = radeon_disable_vblank_kms,
735
    .master_create = radeon_master_create_kms,
736
    .master_destroy = radeon_master_destroy_kms,
737
#if defined(CONFIG_DEBUG_FS)
738
    .debugfs_init = radeon_debugfs_init,
739
    .debugfs_cleanup = radeon_debugfs_cleanup,
740
#endif
741
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
742
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
743
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
744
    .irq_handler = radeon_driver_irq_handler_kms,
745
    .reclaim_buffers = drm_core_reclaim_buffers,
746
    .get_map_ofs = drm_core_get_map_ofs,
747
    .get_reg_ofs = drm_core_get_reg_ofs,
748
    .ioctls = radeon_ioctls_kms,
749
    .gem_init_object = radeon_gem_object_init,
750
    .gem_free_object = radeon_gem_object_free,
751
    .dma_ioctl = radeon_dma_ioctl_kms,
752
    .fops = {
753
         .owner = THIS_MODULE,
754
         .open = drm_open,
755
         .release = drm_release,
756
         .ioctl = drm_ioctl,
757
         .mmap = radeon_mmap,
758
         .poll = drm_poll,
759
         .fasync = drm_fasync,
760
#ifdef CONFIG_COMPAT
761
         .compat_ioctl = NULL,
762
#endif
763
    },
764
 
765
    .pci_driver = {
766
         .name = DRIVER_NAME,
767
         .id_table = pciidlist,
768
         .probe = radeon_pci_probe,
769
         .remove = radeon_pci_remove,
770
         .suspend = radeon_pci_suspend,
771
         .resume = radeon_pci_resume,
772
    },
773
 
774
    .name = DRIVER_NAME,
775
    .desc = DRIVER_DESC,
776
    .date = DRIVER_DATE,
777
    .major = KMS_DRIVER_MAJOR,
778
    .minor = KMS_DRIVER_MINOR,
779
    .patchlevel = KMS_DRIVER_PATCHLEVEL,
780
};
781
*/
782
 
783
 
784
/*
785
 * Driver load/unload
786
 */
787
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
788
{
789
    struct radeon_device *rdev;
790
    int r;
791
 
1120 serge 792
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 793
 
1120 serge 794
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
1117 serge 795
    if (rdev == NULL) {
796
        return -ENOMEM;
797
    };
798
 
799
    dev->dev_private = (void *)rdev;
800
 
801
    /* update BUS flag */
802
//    if (drm_device_is_agp(dev)) {
803
        flags |= RADEON_IS_AGP;
804
//    } else if (drm_device_is_pcie(dev)) {
805
//        flags |= RADEON_IS_PCIE;
806
//    } else {
807
//        flags |= RADEON_IS_PCI;
808
//    }
809
 
810
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
811
    if (r) {
812
        dbgprintf("Failed to initialize Radeon, disabling IOCTL\n");
813
//        radeon_device_fini(rdev);
814
        return r;
815
    }
816
    return 0;
817
}
818
 
819
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
820
{
821
    struct drm_device *dev;
822
    int ret;
823
 
1120 serge 824
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 825
 
826
    dev = malloc(sizeof(*dev));
827
    if (!dev)
828
        return -ENOMEM;
829
 
830
 //   ret = pci_enable_device(pdev);
831
 //   if (ret)
832
 //       goto err_g1;
833
 
834
 //   pci_set_master(pdev);
835
 
836
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
837
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
838
 //       goto err_g2;
839
 //   }
840
 
841
    dev->pdev = pdev;
842
    dev->pci_device = pdev->device;
843
    dev->pci_vendor = pdev->vendor;
844
 
845
 //   if (drm_core_check_feature(dev, DRIVER_MODESET)) {
846
 //       pci_set_drvdata(pdev, dev);
847
 //       ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
848
 //       if (ret)
849
 //           goto err_g2;
850
 //   }
851
 
852
 //   if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
853
 //       goto err_g3;
854
 
855
 //   if (dev->driver->load) {
856
 //       ret = dev->driver->load(dev, ent->driver_data);
857
 //       if (ret)
858
 //           goto err_g4;
859
 //   }
860
 
861
      ret = radeon_driver_load_kms(dev, ent->driver_data );
862
      if (ret)
863
        goto err_g4;
864
 
865
 //   list_add_tail(&dev->driver_item, &driver->device_list);
866
 
867
 //   DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
868
 //        driver->name, driver->major, driver->minor, driver->patchlevel,
869
 //        driver->date, pci_name(pdev), dev->primary->index);
870
 
1128 serge 871
      set_mode(dev, 1024, 768);
1126 serge 872
 
1117 serge 873
    return 0;
874
 
875
err_g4:
876
//    drm_put_minor(&dev->primary);
877
//err_g3:
878
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
879
//        drm_put_minor(&dev->control);
880
//err_g2:
881
//    pci_disable_device(pdev);
882
//err_g1:
883
    free(dev);
884
 
885
    return ret;
886
}
887
 
888
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
889
{
890
    return pci_resource_start(dev->pdev, resource);
891
}
892
 
893
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
894
{
895
    return pci_resource_len(dev->pdev, resource);
896
}
897
 
1123 serge 898
 
899
uint32_t __div64_32(uint64_t *n, uint32_t base)
900
{
901
        uint64_t rem = *n;
902
        uint64_t b = base;
903
        uint64_t res, d = 1;
904
        uint32_t high = rem >> 32;
905
 
906
        /* Reduce the thing a bit first */
907
        res = 0;
908
        if (high >= base) {
909
                high /= base;
910
                res = (uint64_t) high << 32;
911
                rem -= (uint64_t) (high*base) << 32;
912
        }
913
 
914
        while ((int64_t)b > 0 && b < rem) {
915
                b = b+b;
916
                d = d+d;
917
        }
918
 
919
        do {
920
                if (rem >= b) {
921
                        rem -= b;
922
                        res += d;
923
                }
924
                b >>= 1;
925
                d >>= 1;
926
        } while (d);
927
 
928
        *n = res;
929
        return rem;
930
}
931