Subversion Repositories Kolibri OS

Rev

Rev 2332 | Rev 2344 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/*
2
 * Intel GTT (Graphics Translation Table) routines
3
 *
4
 * Caveat: This driver implements the linux agp interface, but this is far from
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
6
 * old userspace intel graphics drivers needed an interface to map memory into
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
9
 * avoid having to create a new api.
10
 *
11
 * With gem this does not make much sense anymore, just needlessly complicates
12
 * the code. But as long as the old graphics stack is still support, it's stuck
13
 * here.
14
 *
15
 * /fairy-tale-mode off
16
 */
17
 
18
#include 
19
#include 
20
#include 
21
#include 
22
//#include 
23
//#include 
24
//#include 
25
#include 
26
#include "agp.h"
27
#include "intel-agp.h"
28
#include "intel-gtt.h"
29
 
30
#include 
31
 
32
struct pci_dev *
33
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
34
 
35
static bool intel_enable_gtt(void);
36
 
37
 
38
#define PCI_VENDOR_ID_INTEL             0x8086
39
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
40
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
2339 Serge 41
#define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
42
#define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
43
#define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
44
#define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
2325 Serge 45
 
46
 
47
#define AGP_NORMAL_MEMORY 0
48
 
49
#define AGP_USER_TYPES (1 << 16)
50
#define AGP_USER_MEMORY (AGP_USER_TYPES)
51
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
52
 
53
 
54
static inline int pci_read_config_word(struct pci_dev *dev, int where,
55
                    u16 *val)
56
{
57
    *val = PciRead16(dev->busnr, dev->devfn, where);
58
    return 1;
59
}
60
 
61
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
62
                    u32 *val)
63
{
64
    *val = PciRead32(dev->busnr, dev->devfn, where);
65
    return 1;
66
}
67
 
68
static inline int pci_write_config_word(struct pci_dev *dev, int where,
69
                    u16 val)
70
{
71
    PciWrite16(dev->busnr, dev->devfn, where, val);
72
    return 1;
73
}
74
 
75
/*
76
 * If we have Intel graphics, we're not going to have anything other than
77
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
2339 Serge 78
 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
2325 Serge 79
 * Only newer chipsets need to bother with this, of course.
80
 */
2339 Serge 81
#ifdef CONFIG_INTEL_IOMMU
2325 Serge 82
#define USE_PCI_DMA_API 1
83
#else
84
#define USE_PCI_DMA_API 0
85
#endif
86
 
87
struct intel_gtt_driver {
88
    unsigned int gen : 8;
89
    unsigned int is_g33 : 1;
90
    unsigned int is_pineview : 1;
91
    unsigned int is_ironlake : 1;
92
    unsigned int has_pgtbl_enable : 1;
93
    unsigned int dma_mask_size : 8;
94
    /* Chipset specific GTT setup */
95
    int (*setup)(void);
96
    /* This should undo anything done in ->setup() save the unmapping
97
     * of the mmio register file, that's done in the generic code. */
98
    void (*cleanup)(void);
99
    void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
100
    /* Flags is a more or less chipset specific opaque value.
101
     * For chipsets that need to support old ums (non-gem) code, this
102
     * needs to be identical to the various supported agp memory types! */
103
    bool (*check_flags)(unsigned int flags);
104
    void (*chipset_flush)(void);
105
};
106
 
107
static struct _intel_private {
108
    struct intel_gtt base;
109
    const struct intel_gtt_driver *driver;
110
    struct pci_dev *pcidev; /* device one */
111
    struct pci_dev *bridge_dev;
112
    u8 __iomem *registers;
113
    phys_addr_t gtt_bus_addr;
114
    phys_addr_t gma_bus_addr;
115
    u32 PGETBL_save;
116
    u32 __iomem *gtt;       /* I915G */
117
    bool clear_fake_agp; /* on first access via agp, fill with scratch */
118
    int num_dcache_entries;
119
    void __iomem *i9xx_flush_page;
120
    char *i81x_gtt_table;
121
    struct resource ifp_resource;
122
    int resource_valid;
123
    struct page *scratch_page;
124
    dma_addr_t scratch_page_dma;
125
} intel_private;
126
 
127
#define INTEL_GTT_GEN   intel_private.driver->gen
128
#define IS_G33          intel_private.driver->is_g33
129
#define IS_PINEVIEW     intel_private.driver->is_pineview
130
#define IS_IRONLAKE     intel_private.driver->is_ironlake
131
#define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
132
 
133
static int intel_gtt_setup_scratch_page(void)
134
{
135
    addr_t page;
136
 
137
    page = AllocPage();
138
    if (page == 0)
139
        return -ENOMEM;
140
 
141
    intel_private.scratch_page_dma = page;
142
    intel_private.scratch_page = NULL;
143
 
144
    return 0;
145
}
146
 
147
static unsigned int intel_gtt_stolen_size(void)
148
{
149
    u16 gmch_ctrl;
150
    u8 rdct;
151
    int local = 0;
152
    static const int ddt[4] = { 0, 16, 32, 64 };
153
    unsigned int stolen_size = 0;
154
 
155
    if (INTEL_GTT_GEN == 1)
156
        return 0; /* no stolen mem on i81x */
157
 
158
    pci_read_config_word(intel_private.bridge_dev,
159
                 I830_GMCH_CTRL, &gmch_ctrl);
160
 
161
    if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
162
        intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
163
        switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
164
        case I830_GMCH_GMS_STOLEN_512:
165
            stolen_size = KB(512);
166
            break;
167
        case I830_GMCH_GMS_STOLEN_1024:
168
            stolen_size = MB(1);
169
            break;
170
        case I830_GMCH_GMS_STOLEN_8192:
171
            stolen_size = MB(8);
172
            break;
173
        case I830_GMCH_GMS_LOCAL:
174
            rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
175
            stolen_size = (I830_RDRAM_ND(rdct) + 1) *
176
                    MB(ddt[I830_RDRAM_DDT(rdct)]);
177
            local = 1;
178
            break;
179
        default:
180
            stolen_size = 0;
181
            break;
182
        }
183
    } else if (INTEL_GTT_GEN == 6) {
184
        /*
185
         * SandyBridge has new memory control reg at 0x50.w
186
         */
187
        u16 snb_gmch_ctl;
188
        pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
189
        switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
190
        case SNB_GMCH_GMS_STOLEN_32M:
191
            stolen_size = MB(32);
192
            break;
193
        case SNB_GMCH_GMS_STOLEN_64M:
194
            stolen_size = MB(64);
195
            break;
196
        case SNB_GMCH_GMS_STOLEN_96M:
197
            stolen_size = MB(96);
198
            break;
199
        case SNB_GMCH_GMS_STOLEN_128M:
200
            stolen_size = MB(128);
201
            break;
202
        case SNB_GMCH_GMS_STOLEN_160M:
203
            stolen_size = MB(160);
204
            break;
205
        case SNB_GMCH_GMS_STOLEN_192M:
206
            stolen_size = MB(192);
207
            break;
208
        case SNB_GMCH_GMS_STOLEN_224M:
209
            stolen_size = MB(224);
210
            break;
211
        case SNB_GMCH_GMS_STOLEN_256M:
212
            stolen_size = MB(256);
213
            break;
214
        case SNB_GMCH_GMS_STOLEN_288M:
215
            stolen_size = MB(288);
216
            break;
217
        case SNB_GMCH_GMS_STOLEN_320M:
218
            stolen_size = MB(320);
219
            break;
220
        case SNB_GMCH_GMS_STOLEN_352M:
221
            stolen_size = MB(352);
222
            break;
223
        case SNB_GMCH_GMS_STOLEN_384M:
224
            stolen_size = MB(384);
225
            break;
226
        case SNB_GMCH_GMS_STOLEN_416M:
227
            stolen_size = MB(416);
228
            break;
229
        case SNB_GMCH_GMS_STOLEN_448M:
230
            stolen_size = MB(448);
231
            break;
232
        case SNB_GMCH_GMS_STOLEN_480M:
233
            stolen_size = MB(480);
234
            break;
235
        case SNB_GMCH_GMS_STOLEN_512M:
236
            stolen_size = MB(512);
237
            break;
238
        }
239
    } else {
240
        switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
241
        case I855_GMCH_GMS_STOLEN_1M:
242
            stolen_size = MB(1);
243
            break;
244
        case I855_GMCH_GMS_STOLEN_4M:
245
            stolen_size = MB(4);
246
            break;
247
        case I855_GMCH_GMS_STOLEN_8M:
248
            stolen_size = MB(8);
249
            break;
250
        case I855_GMCH_GMS_STOLEN_16M:
251
            stolen_size = MB(16);
252
            break;
253
        case I855_GMCH_GMS_STOLEN_32M:
254
            stolen_size = MB(32);
255
            break;
256
        case I915_GMCH_GMS_STOLEN_48M:
257
            stolen_size = MB(48);
258
            break;
259
        case I915_GMCH_GMS_STOLEN_64M:
260
            stolen_size = MB(64);
261
            break;
262
        case G33_GMCH_GMS_STOLEN_128M:
263
            stolen_size = MB(128);
264
            break;
265
        case G33_GMCH_GMS_STOLEN_256M:
266
            stolen_size = MB(256);
267
            break;
268
        case INTEL_GMCH_GMS_STOLEN_96M:
269
            stolen_size = MB(96);
270
            break;
271
        case INTEL_GMCH_GMS_STOLEN_160M:
272
            stolen_size = MB(160);
273
            break;
274
        case INTEL_GMCH_GMS_STOLEN_224M:
275
            stolen_size = MB(224);
276
            break;
277
        case INTEL_GMCH_GMS_STOLEN_352M:
278
            stolen_size = MB(352);
279
            break;
280
        default:
281
            stolen_size = 0;
282
            break;
283
        }
284
    }
285
 
286
    if (stolen_size > 0) {
2332 Serge 287
		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
2325 Serge 288
               stolen_size / KB(1), local ? "local" : "stolen");
289
    } else {
2332 Serge 290
		dev_info(&intel_private.bridge_dev->dev,
291
		       "no pre-allocated video memory detected\n");
2325 Serge 292
        stolen_size = 0;
293
    }
294
 
295
    return stolen_size;
296
}
297
 
298
static void i965_adjust_pgetbl_size(unsigned int size_flag)
299
{
300
    u32 pgetbl_ctl, pgetbl_ctl2;
301
 
302
    /* ensure that ppgtt is disabled */
303
    pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
304
    pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
305
    writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
306
 
307
    /* write the new ggtt size */
308
    pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
309
    pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
310
    pgetbl_ctl |= size_flag;
311
    writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
312
}
313
 
314
static unsigned int i965_gtt_total_entries(void)
315
{
316
    int size;
317
    u32 pgetbl_ctl;
318
    u16 gmch_ctl;
319
 
320
    pci_read_config_word(intel_private.bridge_dev,
321
                 I830_GMCH_CTRL, &gmch_ctl);
322
 
323
    if (INTEL_GTT_GEN == 5) {
324
        switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
325
        case G4x_GMCH_SIZE_1M:
326
        case G4x_GMCH_SIZE_VT_1M:
327
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
328
            break;
329
        case G4x_GMCH_SIZE_VT_1_5M:
330
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
331
            break;
332
        case G4x_GMCH_SIZE_2M:
333
        case G4x_GMCH_SIZE_VT_2M:
334
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
335
            break;
336
        }
337
    }
338
 
339
    pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
340
 
341
    switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
342
    case I965_PGETBL_SIZE_128KB:
343
        size = KB(128);
344
        break;
345
    case I965_PGETBL_SIZE_256KB:
346
        size = KB(256);
347
        break;
348
    case I965_PGETBL_SIZE_512KB:
349
        size = KB(512);
350
        break;
351
    /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
352
    case I965_PGETBL_SIZE_1MB:
353
        size = KB(1024);
354
        break;
355
    case I965_PGETBL_SIZE_2MB:
356
        size = KB(2048);
357
        break;
358
    case I965_PGETBL_SIZE_1_5MB:
359
        size = KB(1024 + 512);
360
        break;
361
    default:
2332 Serge 362
		dev_info(&intel_private.pcidev->dev,
363
			 "unknown page table size, assuming 512KB\n");
2325 Serge 364
        size = KB(512);
365
    }
366
 
367
    return size/4;
368
}
369
 
370
static unsigned int intel_gtt_total_entries(void)
371
{
372
    int size;
373
 
374
    if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
375
        return i965_gtt_total_entries();
376
    else if (INTEL_GTT_GEN == 6) {
377
        u16 snb_gmch_ctl;
378
 
379
        pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
380
        switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
381
        default:
382
        case SNB_GTT_SIZE_0M:
383
            printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
384
            size = MB(0);
385
            break;
386
        case SNB_GTT_SIZE_1M:
387
            size = MB(1);
388
            break;
389
        case SNB_GTT_SIZE_2M:
390
            size = MB(2);
391
            break;
392
        }
393
        return size/4;
394
    } else {
395
        /* On previous hardware, the GTT size was just what was
396
         * required to map the aperture.
397
         */
398
        return intel_private.base.gtt_mappable_entries;
399
    }
400
}
401
 
402
static unsigned int intel_gtt_mappable_entries(void)
403
{
404
    unsigned int aperture_size;
405
 
406
    if (INTEL_GTT_GEN == 1) {
407
        u32 smram_miscc;
408
 
409
        pci_read_config_dword(intel_private.bridge_dev,
410
                      I810_SMRAM_MISCC, &smram_miscc);
411
 
412
        if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
413
                == I810_GFX_MEM_WIN_32M)
414
            aperture_size = MB(32);
415
        else
416
            aperture_size = MB(64);
417
    } else if (INTEL_GTT_GEN == 2) {
418
        u16 gmch_ctrl;
419
 
420
        pci_read_config_word(intel_private.bridge_dev,
421
                     I830_GMCH_CTRL, &gmch_ctrl);
422
 
423
        if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
424
            aperture_size = MB(64);
425
        else
426
            aperture_size = MB(128);
427
    } else {
428
        /* 9xx supports large sizes, just look at the length */
429
        aperture_size = pci_resource_len(intel_private.pcidev, 2);
430
    }
431
 
432
    return aperture_size >> PAGE_SHIFT;
433
}
434
 
435
static void intel_gtt_teardown_scratch_page(void)
436
{
437
   // FreePage(intel_private.scratch_page_dma);
438
}
439
 
440
static void intel_gtt_cleanup(void)
441
{
442
    intel_private.driver->cleanup();
443
 
444
    FreeKernelSpace(intel_private.gtt);
445
    FreeKernelSpace(intel_private.registers);
446
 
2339 Serge 447
	intel_gtt_teardown_scratch_page();
2325 Serge 448
}
449
 
450
static int intel_gtt_init(void)
451
{
452
    u32 gtt_map_size;
453
    int ret;
454
 
455
    ENTER();
456
 
457
    ret = intel_private.driver->setup();
458
    if (ret != 0)
459
    {
460
        LEAVE();
461
        return ret;
462
    };
463
 
464
 
465
    intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
466
    intel_private.base.gtt_total_entries = intel_gtt_total_entries();
467
 
468
    /* save the PGETBL reg for resume */
469
    intel_private.PGETBL_save =
470
        readl(intel_private.registers+I810_PGETBL_CTL)
471
            & ~I810_PGETBL_ENABLED;
472
    /* we only ever restore the register when enabling the PGTBL... */
473
    if (HAS_PGTBL_EN)
474
        intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
475
 
2339 Serge 476
	dev_info(&intel_private.bridge_dev->dev,
477
			"detected gtt size: %dK total, %dK mappable\n",
2325 Serge 478
            intel_private.base.gtt_total_entries * 4,
479
            intel_private.base.gtt_mappable_entries * 4);
480
 
481
    gtt_map_size = intel_private.base.gtt_total_entries * 4;
482
 
483
    intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
484
                    gtt_map_size, PG_SW+PG_NOCACHE);
485
    if (!intel_private.gtt) {
486
        intel_private.driver->cleanup();
487
        FreeKernelSpace(intel_private.registers);
488
        return -ENOMEM;
489
    }
490
 
491
    asm volatile("wbinvd");
492
 
493
    intel_private.base.stolen_size = intel_gtt_stolen_size();
494
 
495
    intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
496
 
497
    ret = intel_gtt_setup_scratch_page();
498
    if (ret != 0) {
499
        intel_gtt_cleanup();
500
        return ret;
501
    }
502
 
503
    intel_enable_gtt();
504
 
505
    LEAVE();
506
 
507
    return 0;
508
}
509
 
2339 Serge 510
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
511
			     unsigned int flags)
512
{
513
	u32 pte_flags = I810_PTE_VALID;
514
 
515
	if (flags ==  AGP_USER_CACHED_MEMORY)
516
		pte_flags |= I830_PTE_SYSTEM_CACHED;
517
 
518
	writel(addr | pte_flags, intel_private.gtt + entry);
519
}
520
 
2325 Serge 521
static bool intel_enable_gtt(void)
522
{
523
    u32 gma_addr;
524
    u8 __iomem *reg;
525
 
526
    if (INTEL_GTT_GEN <= 2)
527
        pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
528
                      &gma_addr);
529
    else
530
        pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
531
                      &gma_addr);
532
 
533
    intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
534
 
535
    if (INTEL_GTT_GEN >= 6)
536
        return true;
537
 
538
    if (INTEL_GTT_GEN == 2) {
539
        u16 gmch_ctrl;
540
 
541
        pci_read_config_word(intel_private.bridge_dev,
542
                     I830_GMCH_CTRL, &gmch_ctrl);
543
        gmch_ctrl |= I830_GMCH_ENABLED;
544
        pci_write_config_word(intel_private.bridge_dev,
545
                      I830_GMCH_CTRL, gmch_ctrl);
546
 
547
        pci_read_config_word(intel_private.bridge_dev,
548
                     I830_GMCH_CTRL, &gmch_ctrl);
549
        if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
2332 Serge 550
			dev_err(&intel_private.pcidev->dev,
551
				"failed to enable the GTT: GMCH_CTRL=%x\n",
2325 Serge 552
                gmch_ctrl);
553
            return false;
554
        }
555
    }
556
 
557
    /* On the resume path we may be adjusting the PGTBL value, so
558
     * be paranoid and flush all chipset write buffers...
559
     */
560
    if (INTEL_GTT_GEN >= 3)
561
        writel(0, intel_private.registers+GFX_FLSH_CNTL);
562
 
563
    reg = intel_private.registers+I810_PGETBL_CTL;
564
    writel(intel_private.PGETBL_save, reg);
565
    if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
2332 Serge 566
		dev_err(&intel_private.pcidev->dev,
567
			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
2325 Serge 568
            readl(reg), intel_private.PGETBL_save);
569
        return false;
570
    }
571
 
572
    if (INTEL_GTT_GEN >= 3)
573
        writel(0, intel_private.registers+GFX_FLSH_CNTL);
574
 
575
    return true;
576
}
577
 
2339 Serge 578
static bool i830_check_flags(unsigned int flags)
579
{
580
	switch (flags) {
581
	case 0:
582
	case AGP_PHYS_MEMORY:
583
	case AGP_USER_CACHED_MEMORY:
584
	case AGP_USER_MEMORY:
585
		return true;
586
	}
2325 Serge 587
 
2339 Serge 588
	return false;
589
}
590
 
2332 Serge 591
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
592
                struct page **pages, unsigned int flags)
593
{
594
    int i, j;
2325 Serge 595
 
2332 Serge 596
    for (i = 0, j = first_entry; i < num_entries; i++, j++) {
597
        dma_addr_t addr = (dma_addr_t)(pages[i]);
598
        intel_private.driver->write_entry(addr,
599
                          j, flags);
600
    }
601
    readl(intel_private.gtt+j-1);
602
}
603
 
604
 
605
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
606
{
607
	unsigned int i;
608
 
609
	for (i = first_entry; i < (first_entry + num_entries); i++) {
610
		intel_private.driver->write_entry(intel_private.scratch_page_dma,
611
						  i, 0);
612
	}
613
	readl(intel_private.gtt+i-1);
614
}
615
 
2325 Serge 616
static void intel_i9xx_setup_flush(void)
617
{
618
    /* return if already configured */
619
    if (intel_private.ifp_resource.start)
620
        return;
621
 
622
    if (INTEL_GTT_GEN == 6)
623
        return;
624
 
625
    /* setup a resource for this object */
2339 Serge 626
//    intel_private.ifp_resource.name = "Intel Flush Page";
627
//    intel_private.ifp_resource.flags = IORESOURCE_MEM;
2325 Serge 628
 
2339 Serge 629
    intel_private.resource_valid = 0;
630
 
2325 Serge 631
    /* Setup chipset flush for 915 */
2339 Serge 632
//    if (IS_G33 || INTEL_GTT_GEN >= 4) {
633
//        intel_i965_g33_setup_chipset_flush();
634
//    } else {
635
//        intel_i915_setup_chipset_flush();
636
//    }
2325 Serge 637
 
2339 Serge 638
//    if (intel_private.ifp_resource.start)
639
//        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
2325 Serge 640
    if (!intel_private.i9xx_flush_page)
641
        dev_err(&intel_private.pcidev->dev,
642
            "can't ioremap flush page - no chipset flushing\n");
2339 Serge 643
}
2325 Serge 644
 
2339 Serge 645
static void i9xx_cleanup(void)
646
{
647
	if (intel_private.i9xx_flush_page)
648
		iounmap(intel_private.i9xx_flush_page);
649
//	if (intel_private.resource_valid)
650
//		release_resource(&intel_private.ifp_resource);
651
	intel_private.ifp_resource.start = 0;
652
	intel_private.resource_valid = 0;
2325 Serge 653
}
654
 
655
static void i9xx_chipset_flush(void)
656
{
657
    if (intel_private.i9xx_flush_page)
658
        writel(1, intel_private.i9xx_flush_page);
659
}
660
 
2339 Serge 661
static void i965_write_entry(dma_addr_t addr,
662
			     unsigned int entry,
663
			     unsigned int flags)
664
{
665
	u32 pte_flags;
666
 
667
	pte_flags = I810_PTE_VALID;
668
	if (flags == AGP_USER_CACHED_MEMORY)
669
		pte_flags |= I830_PTE_SYSTEM_CACHED;
670
 
671
	/* Shift high bits down */
672
	addr |= (addr >> 28) & 0xf0;
673
	writel(addr | pte_flags, intel_private.gtt + entry);
674
}
675
 
2325 Serge 676
static bool gen6_check_flags(unsigned int flags)
677
{
678
    return true;
679
}
680
 
681
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
682
                 unsigned int flags)
683
{
684
    unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
685
    unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
686
    u32 pte_flags;
687
 
688
    if (type_mask == AGP_USER_MEMORY)
689
        pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
690
    else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
691
        pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
692
        if (gfdt)
693
            pte_flags |= GEN6_PTE_GFDT;
694
    } else { /* set 'normal'/'cached' to LLC by default */
695
        pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
696
        if (gfdt)
697
            pte_flags |= GEN6_PTE_GFDT;
698
    }
699
 
700
    /* gen6 has bit11-4 for physical addr bit39-32 */
701
    addr |= (addr >> 28) & 0xff0;
702
    writel(addr | pte_flags, intel_private.gtt + entry);
703
}
704
 
705
static void gen6_cleanup(void)
706
{
707
}
708
 
2339 Serge 709
/* Certain Gen5 chipsets require require idling the GPU before
710
 * unmapping anything from the GTT when VT-d is enabled.
711
 */
712
static inline int needs_idle_maps(void)
713
{
714
#ifdef CONFIG_INTEL_IOMMU
715
	const unsigned short gpu_devid = intel_private.pcidev->device;
716
	extern int intel_iommu_gfx_mapped;
717
 
718
	/* Query intel_iommu to see if we need the workaround. Presumably that
719
	 * was loaded first.
720
	 */
721
	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
722
	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
723
	     intel_iommu_gfx_mapped)
724
		return 1;
725
#endif
726
	return 0;
727
}
728
 
2325 Serge 729
static int i9xx_setup(void)
730
{
731
    u32 reg_addr;
732
 
733
    pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
734
 
735
    reg_addr &= 0xfff80000;
736
 
737
    intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
738
 
739
    if (!intel_private.registers)
740
        return -ENOMEM;
741
 
742
    if (INTEL_GTT_GEN == 3) {
743
        u32 gtt_addr;
744
 
745
        pci_read_config_dword(intel_private.pcidev,
746
                      I915_PTEADDR, >t_addr);
747
        intel_private.gtt_bus_addr = gtt_addr;
748
    } else {
749
        u32 gtt_offset;
750
 
751
        switch (INTEL_GTT_GEN) {
752
        case 5:
753
        case 6:
754
            gtt_offset = MB(2);
755
            break;
756
        case 4:
757
        default:
758
            gtt_offset =  KB(512);
759
            break;
760
        }
761
        intel_private.gtt_bus_addr = reg_addr + gtt_offset;
762
    }
763
 
2339 Serge 764
	if (needs_idle_maps())
765
		intel_private.base.do_idle_maps = 1;
766
 
2325 Serge 767
    intel_i9xx_setup_flush();
768
 
769
    return 0;
770
}
771
 
2339 Serge 772
static const struct intel_gtt_driver i915_gtt_driver = {
773
	.gen = 3,
774
	.has_pgtbl_enable = 1,
775
	.setup = i9xx_setup,
776
	.cleanup = i9xx_cleanup,
777
	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
778
	.write_entry = i830_write_entry,
779
	.dma_mask_size = 32,
780
	.check_flags = i830_check_flags,
781
	.chipset_flush = i9xx_chipset_flush,
782
};
783
static const struct intel_gtt_driver g33_gtt_driver = {
784
	.gen = 3,
785
	.is_g33 = 1,
786
	.setup = i9xx_setup,
787
	.cleanup = i9xx_cleanup,
788
	.write_entry = i965_write_entry,
789
	.dma_mask_size = 36,
790
	.check_flags = i830_check_flags,
791
	.chipset_flush = i9xx_chipset_flush,
792
};
793
static const struct intel_gtt_driver pineview_gtt_driver = {
794
	.gen = 3,
795
	.is_pineview = 1, .is_g33 = 1,
796
	.setup = i9xx_setup,
797
	.cleanup = i9xx_cleanup,
798
	.write_entry = i965_write_entry,
799
	.dma_mask_size = 36,
800
	.check_flags = i830_check_flags,
801
	.chipset_flush = i9xx_chipset_flush,
802
};
803
static const struct intel_gtt_driver i965_gtt_driver = {
804
	.gen = 4,
805
	.has_pgtbl_enable = 1,
806
	.setup = i9xx_setup,
807
	.cleanup = i9xx_cleanup,
808
	.write_entry = i965_write_entry,
809
	.dma_mask_size = 36,
810
	.check_flags = i830_check_flags,
811
	.chipset_flush = i9xx_chipset_flush,
812
};
813
static const struct intel_gtt_driver g4x_gtt_driver = {
814
	.gen = 5,
815
	.setup = i9xx_setup,
816
	.cleanup = i9xx_cleanup,
817
	.write_entry = i965_write_entry,
818
	.dma_mask_size = 36,
819
	.check_flags = i830_check_flags,
820
	.chipset_flush = i9xx_chipset_flush,
821
};
822
static const struct intel_gtt_driver ironlake_gtt_driver = {
823
	.gen = 5,
824
	.is_ironlake = 1,
825
	.setup = i9xx_setup,
826
	.cleanup = i9xx_cleanup,
827
	.write_entry = i965_write_entry,
828
	.dma_mask_size = 36,
829
	.check_flags = i830_check_flags,
830
	.chipset_flush = i9xx_chipset_flush,
831
};
2325 Serge 832
static const struct intel_gtt_driver sandybridge_gtt_driver = {
833
    .gen = 6,
834
    .setup = i9xx_setup,
835
    .cleanup = gen6_cleanup,
836
    .write_entry = gen6_write_entry,
837
    .dma_mask_size = 40,
838
    .check_flags = gen6_check_flags,
839
    .chipset_flush = i9xx_chipset_flush,
840
};
841
 
842
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
843
 * driver and gmch_driver must be non-null, and find_gmch will determine
844
 * which one should be used if a gmch_chip_id is present.
845
 */
846
static const struct intel_gtt_driver_description {
847
    unsigned int gmch_chip_id;
848
    char *name;
849
    const struct intel_gtt_driver *gtt_driver;
850
} intel_gtt_chipsets[] = {
2339 Serge 851
	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
852
		&i915_gtt_driver },
853
	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
854
		&i915_gtt_driver },
855
	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
856
		&i915_gtt_driver },
857
	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
858
		&i915_gtt_driver },
859
	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
860
		&i915_gtt_driver },
861
	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
862
		&i915_gtt_driver },
863
	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
864
		&i965_gtt_driver },
865
	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
866
		&i965_gtt_driver },
867
	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
868
		&i965_gtt_driver },
869
	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
870
		&i965_gtt_driver },
871
	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
872
		&i965_gtt_driver },
873
	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
874
		&i965_gtt_driver },
875
	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
876
		&g33_gtt_driver },
877
	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
878
		&g33_gtt_driver },
879
	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
880
		&g33_gtt_driver },
881
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
882
		&pineview_gtt_driver },
883
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
884
		&pineview_gtt_driver },
885
	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
886
		&g4x_gtt_driver },
887
	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
888
		&g4x_gtt_driver },
889
	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
890
		&g4x_gtt_driver },
891
	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
892
		&g4x_gtt_driver },
893
	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
894
		&g4x_gtt_driver },
895
	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
896
		&g4x_gtt_driver },
897
	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
898
		&g4x_gtt_driver },
899
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
900
	    "HD Graphics", &ironlake_gtt_driver },
901
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
902
	    "HD Graphics", &ironlake_gtt_driver },
2325 Serge 903
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
904
        "Sandybridge", &sandybridge_gtt_driver },
905
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
906
        "Sandybridge", &sandybridge_gtt_driver },
907
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
908
        "Sandybridge", &sandybridge_gtt_driver },
909
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
910
        "Sandybridge", &sandybridge_gtt_driver },
911
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
912
        "Sandybridge", &sandybridge_gtt_driver },
913
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
914
        "Sandybridge", &sandybridge_gtt_driver },
915
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
916
        "Sandybridge", &sandybridge_gtt_driver },
2339 Serge 917
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
918
	    "Ivybridge", &sandybridge_gtt_driver },
919
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
920
	    "Ivybridge", &sandybridge_gtt_driver },
921
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
922
	    "Ivybridge", &sandybridge_gtt_driver },
923
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
924
	    "Ivybridge", &sandybridge_gtt_driver },
925
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
926
	    "Ivybridge", &sandybridge_gtt_driver },
2325 Serge 927
    { 0, NULL, NULL }
928
};
929
 
930
static int find_gmch(u16 device)
931
{
932
    struct pci_dev *gmch_device;
933
 
934
    gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
935
    if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
936
        gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
937
                         device, gmch_device);
938
    }
939
 
940
    if (!gmch_device)
941
        return 0;
942
 
943
    intel_private.pcidev = gmch_device;
944
    return 1;
945
}
946
 
947
int intel_gmch_probe(struct pci_dev *pdev,
948
                      struct agp_bridge_data *bridge)
949
{
950
    int i, mask;
951
    intel_private.driver = NULL;
952
 
953
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
954
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
955
            intel_private.driver =
956
                intel_gtt_chipsets[i].gtt_driver;
957
            break;
958
        }
959
    }
960
 
961
    if (!intel_private.driver)
962
        return 0;
963
 
964
 //   bridge->driver = &intel_fake_agp_driver;
965
    bridge->dev_private_data = &intel_private;
966
    bridge->dev = pdev;
967
 
968
    intel_private.bridge_dev = pdev;
969
 
970
    dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
971
 
972
    mask = intel_private.driver->dma_mask_size;
973
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
974
//        dev_err(&intel_private.pcidev->dev,
975
//            "set gfx device dma mask %d-bit failed!\n", mask);
976
//    else
977
//        pci_set_consistent_dma_mask(intel_private.pcidev,
978
//                        DMA_BIT_MASK(mask));
979
 
980
    /*if (bridge->driver == &intel_810_driver)
981
        return 1;*/
982
 
983
    if (intel_gtt_init() != 0)
984
        return 0;
985
 
986
    return 1;
987
}
2339 Serge 988
EXPORT_SYMBOL(intel_gmch_probe);
2325 Serge 989
 
2326 Serge 990
const struct intel_gtt *intel_gtt_get(void)
991
{
992
    return &intel_private.base;
993
}
2339 Serge 994
EXPORT_SYMBOL(intel_gtt_get);
2326 Serge 995
 
2332 Serge 996
void intel_gtt_chipset_flush(void)
997
{
998
	if (intel_private.driver->chipset_flush)
999
		intel_private.driver->chipset_flush();
1000
}
2339 Serge 1001
EXPORT_SYMBOL(intel_gtt_chipset_flush);
2327 Serge 1002
 
2332 Serge 1003
 
2327 Serge 1004
phys_addr_t get_bus_addr(void)
1005
{
1006
    return intel_private.gma_bus_addr;
1007
};