Subversion Repositories Kolibri OS

Rev

Rev 1117 | Rev 1120 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1117 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
//#include "drmP.h"
29
#include "radeon_reg.h"
30
#include "radeon.h"
31
 
32
/* r520,rv530,rv560,rv570,r580 depends on : */
33
void r100_hdp_reset(struct radeon_device *rdev);
34
int rv370_pcie_gart_enable(struct radeon_device *rdev);
35
void rv370_pcie_gart_disable(struct radeon_device *rdev);
36
void r420_pipes_init(struct radeon_device *rdev);
37
void rs600_mc_disable_clients(struct radeon_device *rdev);
38
void rs600_disable_vga(struct radeon_device *rdev);
39
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
40
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
41
 
42
/* This files gather functions specifics to:
43
 * r520,rv530,rv560,rv570,r580
44
 *
45
 * Some of these functions might be used by newer ASICs.
46
 */
47
void r520_gpu_init(struct radeon_device *rdev);
48
int r520_mc_wait_for_idle(struct radeon_device *rdev);
49
 
50
/*
51
 * MC
52
 */
53
int r520_mc_init(struct radeon_device *rdev)
54
{
55
	uint32_t tmp;
56
	int r;
57
 
1119 serge 58
    dbgprintf("%s\n",__FUNCTION__);
1117 serge 59
 
1119 serge 60
//   if (r100_debugfs_rbbm_init(rdev)) {
61
//       DRM_ERROR("Failed to register debugfs file for RBBM !\n");
62
//   }
63
//   if (rv515_debugfs_pipes_info_init(rdev)) {
64
//       DRM_ERROR("Failed to register debugfs file for pipes !\n");
65
//   }
66
//   if (rv515_debugfs_ga_info_init(rdev)) {
67
//       DRM_ERROR("Failed to register debugfs file for pipes !\n");
68
//   }
69
 
1117 serge 70
	r520_gpu_init(rdev);
71
	rv370_pcie_gart_disable(rdev);
72
 
73
	/* Setup GPU memory space */
74
	rdev->mc.vram_location = 0xFFFFFFFFUL;
75
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
76
	if (rdev->flags & RADEON_IS_AGP) {
77
		r = radeon_agp_init(rdev);
78
		if (r) {
79
			printk(KERN_WARNING "[drm] Disabling AGP\n");
80
			rdev->flags &= ~RADEON_IS_AGP;
81
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
82
		} else {
83
			rdev->mc.gtt_location = rdev->mc.agp_base;
84
		}
85
	}
86
	r = radeon_mc_setup(rdev);
87
	if (r) {
88
		return r;
89
	}
90
 
91
	/* Program GPU memory space */
1119 serge 92
    rs600_mc_disable_clients(rdev);
93
    if (r520_mc_wait_for_idle(rdev)) {
94
       printk(KERN_WARNING "Failed to wait MC idle while "
1117 serge 95
		       "programming pipes. Bad things might happen.\n");
96
	}
97
	/* Write VRAM size in case we are limiting it */
98
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
99
	tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
100
	tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
101
	tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
102
	WREG32_MC(R520_MC_FB_LOCATION, tmp);
103
	WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
104
	WREG32(0x310, rdev->mc.vram_location);
105
	if (rdev->flags & RADEON_IS_AGP) {
106
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
107
		tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
108
		tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
109
		WREG32_MC(R520_MC_AGP_LOCATION, tmp);
110
		WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
111
		WREG32_MC(R520_MC_AGP_BASE_2, 0);
112
	} else {
113
		WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
114
		WREG32_MC(R520_MC_AGP_BASE, 0);
115
		WREG32_MC(R520_MC_AGP_BASE_2, 0);
116
	}
1119 serge 117
 
118
    dbgprintf("done: %s\n",__FUNCTION__);
119
 
1117 serge 120
	return 0;
121
}
122
 
123
void r520_mc_fini(struct radeon_device *rdev)
124
{
125
	rv370_pcie_gart_disable(rdev);
126
	radeon_gart_table_vram_free(rdev);
127
	radeon_gart_fini(rdev);
128
}
129
 
130
 
131
/*
132
 * Global GPU functions
133
 */
134
void r520_errata(struct radeon_device *rdev)
135
{
136
	rdev->pll_errata = 0;
137
}
138
 
139
int r520_mc_wait_for_idle(struct radeon_device *rdev)
140
{
141
	unsigned i;
142
	uint32_t tmp;
143
 
144
	for (i = 0; i < rdev->usec_timeout; i++) {
145
		/* read MC_STATUS */
146
		tmp = RREG32_MC(R520_MC_STATUS);
147
		if (tmp & R520_MC_STATUS_IDLE) {
148
			return 0;
149
		}
150
		DRM_UDELAY(1);
151
	}
152
	return -1;
153
}
154
 
155
void r520_gpu_init(struct radeon_device *rdev)
156
{
157
	unsigned pipe_select_current, gb_pipe_select, tmp;
1119 serge 158
    dbgprintf("%s\n\r",__FUNCTION__);
1117 serge 159
 
160
	r100_hdp_reset(rdev);
161
	rs600_disable_vga(rdev);
162
	/*
163
	 * DST_PIPE_CONFIG		0x170C
164
	 * GB_TILE_CONFIG		0x4018
165
	 * GB_FIFO_SIZE			0x4024
166
	 * GB_PIPE_SELECT		0x402C
167
	 * GB_PIPE_SELECT2              0x4124
168
	 *	Z_PIPE_SHIFT			0
169
	 *	Z_PIPE_MASK			0x000000003
170
	 * GB_FIFO_SIZE2                0x4128
171
	 *	SC_SFIFO_SIZE_SHIFT		0
172
	 *	SC_SFIFO_SIZE_MASK		0x000000003
173
	 *	SC_MFIFO_SIZE_SHIFT		2
174
	 *	SC_MFIFO_SIZE_MASK		0x00000000C
175
	 *	FG_SFIFO_SIZE_SHIFT		4
176
	 *	FG_SFIFO_SIZE_MASK		0x000000030
177
	 *	ZB_MFIFO_SIZE_SHIFT		6
178
	 *	ZB_MFIFO_SIZE_MASK		0x0000000C0
179
	 * GA_ENHANCE			0x4274
180
	 * SU_REG_DEST			0x42C8
181
	 */
182
	/* workaround for RV530 */
183
	if (rdev->family == CHIP_RV530) {
184
		WREG32(0x4124, 1);
185
		WREG32(0x4128, 0xFF);
186
	}
187
	r420_pipes_init(rdev);
188
	gb_pipe_select = RREG32(0x402C);
189
	tmp = RREG32(0x170C);
190
	pipe_select_current = (tmp >> 2) & 3;
191
	tmp = (1 << pipe_select_current) |
192
	      (((gb_pipe_select >> 8) & 0xF) << 4);
193
	WREG32_PLL(0x000D, tmp);
194
	if (r520_mc_wait_for_idle(rdev)) {
195
		printk(KERN_WARNING "Failed to wait MC idle while "
196
		       "programming pipes. Bad things might happen.\n");
197
	}
198
}
199
 
200
 
201
/*
202
 * VRAM info
203
 */
204
static void r520_vram_get_type(struct radeon_device *rdev)
205
{
206
	uint32_t tmp;
1119 serge 207
    dbgprintf("%s\n\r",__FUNCTION__);
1117 serge 208
 
209
	rdev->mc.vram_width = 128;
210
	rdev->mc.vram_is_ddr = true;
211
	tmp = RREG32_MC(R520_MC_CNTL0);
212
	switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
213
	case 0:
214
		rdev->mc.vram_width = 32;
215
		break;
216
	case 1:
217
		rdev->mc.vram_width = 64;
218
		break;
219
	case 2:
220
		rdev->mc.vram_width = 128;
221
		break;
222
	case 3:
223
		rdev->mc.vram_width = 256;
224
		break;
225
	default:
226
		rdev->mc.vram_width = 128;
227
		break;
228
	}
229
	if (tmp & R520_MC_CHANNEL_SIZE)
230
		rdev->mc.vram_width *= 2;
231
}
232
 
233
void r520_vram_info(struct radeon_device *rdev)
234
{
235
	r520_vram_get_type(rdev);
236
	rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
237
 
238
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
239
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
240
}
241
 
1119 serge 242
/*
243
 * Global GPU functions
244
 */
245
void rs600_disable_vga(struct radeon_device *rdev)
246
{
247
    unsigned tmp;
248
    dbgprintf("%s\n\r",__FUNCTION__);
249
 
250
    WREG32(0x330, 0);
251
    WREG32(0x338, 0);
252
    tmp = RREG32(0x300);
253
    tmp &= ~(3 << 16);
254
    WREG32(0x300, tmp);
255
    WREG32(0x308, (1 << 8));
256
    WREG32(0x310, rdev->mc.vram_location);
257
    WREG32(0x594, 0);
258
}
259
 
260
 
261
void r420_pipes_init(struct radeon_device *rdev)
262
{
263
    unsigned tmp;
264
    unsigned gb_pipe_select;
265
    unsigned num_pipes;
266
 
267
    dbgprintf("%s\n\r",__FUNCTION__);
268
 
269
    /* GA_ENHANCE workaround TCL deadlock issue */
270
    WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
271
    /* get max number of pipes */
272
    gb_pipe_select = RREG32(0x402C);
273
    num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
274
    rdev->num_gb_pipes = num_pipes;
275
    tmp = 0;
276
    switch (num_pipes) {
277
    default:
278
        /* force to 1 pipe */
279
        num_pipes = 1;
280
    case 1:
281
        tmp = (0 << 1);
282
        break;
283
    case 2:
284
        tmp = (3 << 1);
285
        break;
286
    case 3:
287
        tmp = (6 << 1);
288
        break;
289
    case 4:
290
        tmp = (7 << 1);
291
        break;
292
    }
293
    WREG32(0x42C8, (1 << num_pipes) - 1);
294
    /* Sub pixel 1/12 so we can have 4K rendering according to doc */
295
    tmp |= (1 << 4) | (1 << 0);
296
    WREG32(0x4018, tmp);
297
    if (r100_gui_wait_for_idle(rdev)) {
298
        printk(KERN_WARNING "Failed to wait GUI idle while "
299
               "programming pipes. Bad things might happen.\n");
300
    }
301
 
302
    tmp = RREG32(0x170C);
303
    WREG32(0x170C, tmp | (1 << 31));
304
 
305
    WREG32(R300_RB2D_DSTCACHE_MODE,
306
           RREG32(R300_RB2D_DSTCACHE_MODE) |
307
           R300_DC_AUTOFLUSH_ENABLE |
308
           R300_DC_DC_DISABLE_IGNORE_PE);
309
 
310
    if (r100_gui_wait_for_idle(rdev)) {
311
        printk(KERN_WARNING "Failed to wait GUI idle while "
312
               "programming pipes. Bad things might happen.\n");
313
    }
314
    DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
315
}
316
 
317
void rv370_pcie_gart_disable(struct radeon_device *rdev)
318
{
319
    uint32_t tmp;
320
    dbgprintf("%s\n\r",__FUNCTION__);
321
 
322
    tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
323
    tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
324
    WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
325
    if (rdev->gart.table.vram.robj) {
326
//        radeon_object_kunmap(rdev->gart.table.vram.robj);
327
//        radeon_object_unpin(rdev->gart.table.vram.robj);
328
    }
329
}
330
 
331
void radeon_gart_table_vram_free(struct radeon_device *rdev)
332
{
333
    if (rdev->gart.table.vram.robj == NULL) {
334
        return;
335
    }
336
//    radeon_object_kunmap(rdev->gart.table.vram.robj);
337
//    radeon_object_unpin(rdev->gart.table.vram.robj);
338
//    radeon_object_unref(&rdev->gart.table.vram.robj);
339
}
340
 
341
/*
342
 * Common gart functions.
343
 */
344
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
345
            int pages)
346
{
347
    unsigned t;
348
    unsigned p;
349
    int i, j;
350
    dbgprintf("%s\n\r",__FUNCTION__);
351
 
352
    if (!rdev->gart.ready) {
353
        dbgprintf("trying to unbind memory to unitialized GART !\n");
354
        return;
355
    }
356
    t = offset / 4096;
357
    p = t / (PAGE_SIZE / 4096);
358
    for (i = 0; i < pages; i++, p++) {
359
        if (rdev->gart.pages[p]) {
360
//            pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
361
//                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
362
            rdev->gart.pages[p] = NULL;
363
            rdev->gart.pages_addr[p] = 0;
364
            for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
365
                radeon_gart_set_page(rdev, t, 0);
366
            }
367
        }
368
    }
369
    mb();
370
    radeon_gart_tlb_flush(rdev);
371
}
372
 
373
 
374
 
375
void radeon_gart_fini(struct radeon_device *rdev)
376
{
377
    if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
378
        /* unbind pages */
379
        radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
380
    }
381
    rdev->gart.ready = false;
382
//    kfree(rdev->gart.pages);
383
//    kfree(rdev->gart.pages_addr);
384
    rdev->gart.pages = NULL;
385
    rdev->gart.pages_addr = NULL;
386
}
387
 
388
 
389
 
390
int radeon_agp_init(struct radeon_device *rdev)
391
{
392
 
393
    dbgprintf("%s\n\r",__FUNCTION__);
394
 
395
#if __OS_HAS_AGP
396
    struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
397
    struct drm_agp_mode mode;
398
    struct drm_agp_info info;
399
    uint32_t agp_status;
400
    int default_mode;
401
    bool is_v3;
402
    int ret;
403
 
404
    /* Acquire AGP. */
405
    if (!rdev->ddev->agp->acquired) {
406
        ret = drm_agp_acquire(rdev->ddev);
407
        if (ret) {
408
            DRM_ERROR("Unable to acquire AGP: %d\n", ret);
409
            return ret;
410
        }
411
    }
412
 
413
    ret = drm_agp_info(rdev->ddev, &info);
414
    if (ret) {
415
        DRM_ERROR("Unable to get AGP info: %d\n", ret);
416
        return ret;
417
    }
418
    mode.mode = info.mode;
419
    agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
420
    is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
421
 
422
    if (is_v3) {
423
        default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
424
    } else {
425
        if (agp_status & RADEON_AGP_4X_MODE) {
426
            default_mode = 4;
427
        } else if (agp_status & RADEON_AGP_2X_MODE) {
428
            default_mode = 2;
429
        } else {
430
            default_mode = 1;
431
        }
432
    }
433
 
434
    /* Apply AGPMode Quirks */
435
    while (p && p->chip_device != 0) {
436
        if (info.id_vendor == p->hostbridge_vendor &&
437
            info.id_device == p->hostbridge_device &&
438
            rdev->pdev->vendor == p->chip_vendor &&
439
            rdev->pdev->device == p->chip_device &&
440
            rdev->pdev->subsystem_vendor == p->subsys_vendor &&
441
            rdev->pdev->subsystem_device == p->subsys_device) {
442
            default_mode = p->default_mode;
443
        }
444
        ++p;
445
    }
446
 
447
    if (radeon_agpmode > 0) {
448
        if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
449
            (radeon_agpmode > (is_v3 ? 8 : 4)) ||
450
            (radeon_agpmode & (radeon_agpmode - 1))) {
451
            DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
452
                  radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
453
                  default_mode);
454
            radeon_agpmode = default_mode;
455
        } else {
456
            DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
457
        }
458
    } else {
459
        radeon_agpmode = default_mode;
460
    }
461
 
462
    mode.mode &= ~RADEON_AGP_MODE_MASK;
463
    if (is_v3) {
464
        switch (radeon_agpmode) {
465
        case 8:
466
            mode.mode |= RADEON_AGPv3_8X_MODE;
467
            break;
468
        case 4:
469
        default:
470
            mode.mode |= RADEON_AGPv3_4X_MODE;
471
            break;
472
        }
473
    } else {
474
        switch (radeon_agpmode) {
475
        case 4:
476
            mode.mode |= RADEON_AGP_4X_MODE;
477
            break;
478
        case 2:
479
            mode.mode |= RADEON_AGP_2X_MODE;
480
            break;
481
        case 1:
482
        default:
483
            mode.mode |= RADEON_AGP_1X_MODE;
484
            break;
485
        }
486
    }
487
 
488
    mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
489
    ret = drm_agp_enable(rdev->ddev, mode);
490
    if (ret) {
491
        DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
492
        return ret;
493
    }
494
 
495
    rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
496
    rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
497
 
498
    /* workaround some hw issues */
499
    if (rdev->family < CHIP_R200) {
500
        WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
501
    }
502
    return 0;
503
#else
504
    return 0;
505
#endif
506
}
507
 
508
 
509
void rs600_mc_disable_clients(struct radeon_device *rdev)
510
{
511
    unsigned tmp;
512
    dbgprintf("%s\n",__FUNCTION__);
513
 
514
    if (r100_gui_wait_for_idle(rdev)) {
515
        printk(KERN_WARNING "Failed to wait GUI idle while "
516
               "programming pipes. Bad things might happen.\n");
517
    }
518
 
519
    tmp = RREG32(AVIVO_D1VGA_CONTROL);
520
    WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
521
    tmp = RREG32(AVIVO_D2VGA_CONTROL);
522
    WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
523
 
524
    tmp = RREG32(AVIVO_D1CRTC_CONTROL);
525
    WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
526
    tmp = RREG32(AVIVO_D2CRTC_CONTROL);
527
    WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
528
 
529
    /* make sure all previous write got through */
530
    tmp = RREG32(AVIVO_D2CRTC_CONTROL);
531
 
532
    mdelay(1);
533
 
534
    dbgprintf("done\n");
535
 
536
}
537
 
538
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
539
{
540
    void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
541
 
542
    if (i < 0 || i > rdev->gart.num_gpu_pages) {
543
        return -EINVAL;
544
    }
545
    addr = (((u32_t)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
546
    writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
547
    return 0;
548
}
549
 
550
 
551
int radeon_gart_init(struct radeon_device *rdev)
552
{
553
 
554
    dbgprintf("%s\n",__FUNCTION__);
555
 
556
    if (rdev->gart.pages) {
557
        return 0;
558
    }
559
    /* We need PAGE_SIZE >= 4096 */
560
    if (PAGE_SIZE < 4096) {
561
        DRM_ERROR("Page size is smaller than GPU page size!\n");
562
        return -EINVAL;
563
    }
564
    /* Compute table size */
565
    rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
566
    rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
567
    DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
568
         rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
569
    /* Allocate pages table */
570
    rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
571
                   GFP_KERNEL);
572
    if (rdev->gart.pages == NULL) {
573
//        radeon_gart_fini(rdev);
574
        return -ENOMEM;
575
    }
576
    rdev->gart.pages_addr = kzalloc(sizeof(u32_t) *
577
                    rdev->gart.num_cpu_pages, GFP_KERNEL);
578
    if (rdev->gart.pages_addr == NULL) {
579
//        radeon_gart_fini(rdev);
580
        return -ENOMEM;
581
    }
582
    return 0;
583
}
584
 
585
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
586
{
587
    uint32_t gpu_addr;
588
    int r;
589
 
590
//    if (rdev->gart.table.vram.robj == NULL) {
591
//        r = radeon_object_create(rdev, NULL,
592
//                     rdev->gart.table_size,
593
//                     true,
594
//                     RADEON_GEM_DOMAIN_VRAM,
595
//                     false, &rdev->gart.table.vram.robj);
596
//        if (r) {
597
//            return r;
598
//        }
599
//    }
600
//    r = radeon_object_pin(rdev->gart.table.vram.robj,
601
//                  RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
602
//    if (r) {
603
//        radeon_object_unref(&rdev->gart.table.vram.robj);
604
//        return r;
605
//    }
606
//    r = radeon_object_kmap(rdev->gart.table.vram.robj,
607
//                   (void **)&rdev->gart.table.vram.ptr);
608
//    if (r) {
609
//        radeon_object_unpin(rdev->gart.table.vram.robj);
610
//        radeon_object_unref(&rdev->gart.table.vram.robj);
611
//        DRM_ERROR("radeon: failed to map gart vram table.\n");
612
//        return r;
613
//    }
614
 
615
    gpu_addr = 0x800000;
616
 
617
    u32_t pci_addr = rdev->mc.aper_base + gpu_addr;
618
 
619
    rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW);
620
 
621
    rdev->gart.table_addr = gpu_addr;
622
 
623
    dbgprintf("alloc gart vram:\n  gpu_base %x pci_base %x lin_addr %x",
624
               gpu_addr, pci_addr, rdev->gart.table.vram.ptr);
625
 
626
    return 0;
627
}
628
 
629
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
630
 
631
int rv370_pcie_gart_enable(struct radeon_device *rdev)
632
{
633
    uint32_t table_addr;
634
    uint32_t tmp;
635
    int r;
636
 
637
    dbgprintf("%s\n",__FUNCTION__);
638
 
639
    /* Initialize common gart structure */
640
    r = radeon_gart_init(rdev);
641
    if (r) {
642
        return r;
643
    }
644
 //   r = rv370_debugfs_pcie_gart_info_init(rdev);
645
 //   if (r) {
646
 //       DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
647
 //   }
648
    rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
649
    r = radeon_gart_table_vram_alloc(rdev);
650
    if (r) {
651
        return r;
652
    }
653
    /* discard memory request outside of configured range */
654
    tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
655
    WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
656
    WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
657
    tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
658
    WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
659
    WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
660
    WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
661
    table_addr = rdev->gart.table_addr;
662
    WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
663
    /* FIXME: setup default page */
664
    WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
665
    WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
666
    /* Clear error */
667
    WREG32_PCIE(0x18, 0);
668
    tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
669
    tmp |= RADEON_PCIE_TX_GART_EN;
670
    tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
671
    WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
672
    rv370_pcie_gart_tlb_flush(rdev);
673
    DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
674
         rdev->mc.gtt_size >> 20, table_addr);
675
    rdev->gart.ready = true;
676
    return 0;
677
}
678
 
679
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
680
{
681
    uint32_t tmp;
682
    int i;
683
 
684
    /* Workaround HW bug do flush 2 times */
685
    for (i = 0; i < 2; i++) {
686
        tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
687
        WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
688
        (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
689
        WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
690
        mb();
691
    }
692
}
693
 
694
int r300_gart_enable(struct radeon_device *rdev)
695
{
696
#if __OS_HAS_AGP
697
    if (rdev->flags & RADEON_IS_AGP) {
698
        if (rdev->family > CHIP_RV350) {
699
            rv370_pcie_gart_disable(rdev);
700
        } else {
701
            r100_pci_gart_disable(rdev);
702
        }
703
        return 0;
704
    }
705
#endif
706
    if (rdev->flags & RADEON_IS_PCIE) {
707
        rdev->asic->gart_disable = &rv370_pcie_gart_disable;
708
        rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
709
        rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
710
        return rv370_pcie_gart_enable(rdev);
711
    }
712
 //   return r100_pci_gart_enable(rdev);
713
}
714
 
715
 
716
 
717
int radeon_fence_driver_init(struct radeon_device *rdev)
718
{
719
    unsigned long irq_flags;
720
    int r;
721
 
722
//    write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
723
    r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
724
    if (r) {
725
        DRM_ERROR("Fence failed to get a scratch register.");
726
//        write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
727
        return r;
728
    }
729
    WREG32(rdev->fence_drv.scratch_reg, 0);
730
//    atomic_set(&rdev->fence_drv.seq, 0);
731
//    INIT_LIST_HEAD(&rdev->fence_drv.created);
732
//    INIT_LIST_HEAD(&rdev->fence_drv.emited);
733
//    INIT_LIST_HEAD(&rdev->fence_drv.signaled);
734
    rdev->fence_drv.count_timeout = 0;
735
//    init_waitqueue_head(&rdev->fence_drv.queue);
736
//    write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
737
//    if (radeon_debugfs_fence_init(rdev)) {
738
//        DRM_ERROR("Failed to register debugfs file for fence !\n");
739
//    }
740
    return 0;
741
}
742
 
743
 
744
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
745
             int pages, u32_t *pagelist)
746
{
747
    unsigned t;
748
    unsigned p;
749
    uint64_t page_base;
750
    int i, j;
751
 
752
    dbgprintf("%s\n\r",__FUNCTION__);
753
 
754
 
755
    if (!rdev->gart.ready) {
756
        DRM_ERROR("trying to bind memory to unitialized GART !\n");
757
        return -EINVAL;
758
    }
759
    t = offset / 4096;
760
    p = t / (PAGE_SIZE / 4096);
761
 
762
    for (i = 0; i < pages; i++, p++) {
763
        /* we need to support large memory configurations */
764
        /* assume that unbind have already been call on the range */
765
 
766
        rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
767
 
768
        //if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
769
        //    /* FIXME: failed to map page (return -ENOMEM?) */
770
        //    radeon_gart_unbind(rdev, offset, pages);
771
        //    return -ENOMEM;
772
        //}
773
        rdev->gart.pages[p] = pagelist[i];
774
        page_base = (uint32_t)rdev->gart.pages_addr[p];
775
        for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
776
            radeon_gart_set_page(rdev, t, page_base);
777
            page_base += 4096;
778
        }
779
    }
780
    mb();
781
    radeon_gart_tlb_flush(rdev);
782
 
783
    dbgprintf("done %s\n",__FUNCTION__);
784
 
785
    return 0;
786
}
787