Subversion Repositories Kolibri OS

Rev

Rev 2326 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/*
2
 * Intel GTT (Graphics Translation Table) routines
3
 *
4
 * Caveat: This driver implements the linux agp interface, but this is far from
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
6
 * old userspace intel graphics drivers needed an interface to map memory into
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
9
 * avoid having to create a new api.
10
 *
11
 * With gem this does not make much sense anymore, just needlessly complicates
12
 * the code. But as long as the old graphics stack is still support, it's stuck
13
 * here.
14
 *
15
 * /fairy-tale-mode off
16
 */
17
 
18
#include 
19
#include 
20
#include 
21
#include 
22
//#include 
23
//#include 
24
//#include 
25
#include 
26
#include "agp.h"
27
#include "intel-agp.h"
28
#include "intel-gtt.h"
29
 
30
#include 
31
 
32
struct pci_dev *
33
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
34
 
35
static bool intel_enable_gtt(void);
36
 
37
 
38
#define PG_SW       0x003
39
#define PG_NOCACHE  0x018
40
 
41
#define PCI_VENDOR_ID_INTEL             0x8086
42
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
43
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
44
 
45
 
46
#define AGP_NORMAL_MEMORY 0
47
 
48
#define AGP_USER_TYPES (1 << 16)
49
#define AGP_USER_MEMORY (AGP_USER_TYPES)
50
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
51
 
52
static inline uint8_t __raw_readb(const volatile void __iomem *addr)
53
{
54
    return *(const volatile uint8_t __force *) addr;
55
}
56
 
57
static inline uint16_t __raw_readw(const volatile void __iomem *addr)
58
{
59
    return *(const volatile uint16_t __force *) addr;
60
}
61
 
62
static inline uint32_t __raw_readl(const volatile void __iomem *addr)
63
{
64
    return *(const volatile uint32_t __force *) addr;
65
}
66
 
67
#define readb __raw_readb
68
#define readw __raw_readw
69
#define readl __raw_readl
70
 
71
 
72
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr)
73
{    *(volatile uint8_t __force *) addr = b;}
74
 
75
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr)
76
{    *(volatile uint16_t __force *) addr = b;}
77
 
78
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr)
79
{    *(volatile uint32_t __force *) addr = b;}
80
 
81
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr)
82
{    *(volatile __u64 *)addr = b;}
83
 
84
#define writeb __raw_writeb
85
#define writew __raw_writew
86
#define writel __raw_writel
87
#define writeq __raw_writeq
88
 
89
static inline int pci_read_config_word(struct pci_dev *dev, int where,
90
                    u16 *val)
91
{
92
    *val = PciRead16(dev->busnr, dev->devfn, where);
93
    return 1;
94
}
95
 
96
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
97
                    u32 *val)
98
{
99
    *val = PciRead32(dev->busnr, dev->devfn, where);
100
    return 1;
101
}
102
 
103
static inline int pci_write_config_word(struct pci_dev *dev, int where,
104
                    u16 val)
105
{
106
    PciWrite16(dev->busnr, dev->devfn, where, val);
107
    return 1;
108
}
109
 
110
/*
111
 * If we have Intel graphics, we're not going to have anything other than
112
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
113
 * on the Intel IOMMU support (CONFIG_DMAR).
114
 * Only newer chipsets need to bother with this, of course.
115
 */
116
#ifdef CONFIG_DMAR
117
#define USE_PCI_DMA_API 1
118
#else
119
#define USE_PCI_DMA_API 0
120
#endif
121
 
122
struct intel_gtt_driver {
123
    unsigned int gen : 8;
124
    unsigned int is_g33 : 1;
125
    unsigned int is_pineview : 1;
126
    unsigned int is_ironlake : 1;
127
    unsigned int has_pgtbl_enable : 1;
128
    unsigned int dma_mask_size : 8;
129
    /* Chipset specific GTT setup */
130
    int (*setup)(void);
131
    /* This should undo anything done in ->setup() save the unmapping
132
     * of the mmio register file, that's done in the generic code. */
133
    void (*cleanup)(void);
134
    void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
135
    /* Flags is a more or less chipset specific opaque value.
136
     * For chipsets that need to support old ums (non-gem) code, this
137
     * needs to be identical to the various supported agp memory types! */
138
    bool (*check_flags)(unsigned int flags);
139
    void (*chipset_flush)(void);
140
};
141
 
142
static struct _intel_private {
143
    struct intel_gtt base;
144
    const struct intel_gtt_driver *driver;
145
    struct pci_dev *pcidev; /* device one */
146
    struct pci_dev *bridge_dev;
147
    u8 __iomem *registers;
148
    phys_addr_t gtt_bus_addr;
149
    phys_addr_t gma_bus_addr;
150
    u32 PGETBL_save;
151
    u32 __iomem *gtt;       /* I915G */
152
    bool clear_fake_agp; /* on first access via agp, fill with scratch */
153
    int num_dcache_entries;
154
    void __iomem *i9xx_flush_page;
155
    char *i81x_gtt_table;
156
    struct resource ifp_resource;
157
    int resource_valid;
158
    struct page *scratch_page;
159
    dma_addr_t scratch_page_dma;
160
} intel_private;
161
 
162
#define INTEL_GTT_GEN   intel_private.driver->gen
163
#define IS_G33          intel_private.driver->is_g33
164
#define IS_PINEVIEW     intel_private.driver->is_pineview
165
#define IS_IRONLAKE     intel_private.driver->is_ironlake
166
#define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
167
 
168
static int intel_gtt_setup_scratch_page(void)
169
{
170
    addr_t page;
171
 
172
    page = AllocPage();
173
    if (page == 0)
174
        return -ENOMEM;
175
 
176
    intel_private.scratch_page_dma = page;
177
    intel_private.scratch_page = NULL;
178
 
179
    return 0;
180
}
181
 
182
static unsigned int intel_gtt_stolen_size(void)
183
{
184
    u16 gmch_ctrl;
185
    u8 rdct;
186
    int local = 0;
187
    static const int ddt[4] = { 0, 16, 32, 64 };
188
    unsigned int stolen_size = 0;
189
 
190
    if (INTEL_GTT_GEN == 1)
191
        return 0; /* no stolen mem on i81x */
192
 
193
    pci_read_config_word(intel_private.bridge_dev,
194
                 I830_GMCH_CTRL, &gmch_ctrl);
195
 
196
    if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
197
        intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
198
        switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
199
        case I830_GMCH_GMS_STOLEN_512:
200
            stolen_size = KB(512);
201
            break;
202
        case I830_GMCH_GMS_STOLEN_1024:
203
            stolen_size = MB(1);
204
            break;
205
        case I830_GMCH_GMS_STOLEN_8192:
206
            stolen_size = MB(8);
207
            break;
208
        case I830_GMCH_GMS_LOCAL:
209
            rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
210
            stolen_size = (I830_RDRAM_ND(rdct) + 1) *
211
                    MB(ddt[I830_RDRAM_DDT(rdct)]);
212
            local = 1;
213
            break;
214
        default:
215
            stolen_size = 0;
216
            break;
217
        }
218
    } else if (INTEL_GTT_GEN == 6) {
219
        /*
220
         * SandyBridge has new memory control reg at 0x50.w
221
         */
222
        u16 snb_gmch_ctl;
223
        pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
224
        switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
225
        case SNB_GMCH_GMS_STOLEN_32M:
226
            stolen_size = MB(32);
227
            break;
228
        case SNB_GMCH_GMS_STOLEN_64M:
229
            stolen_size = MB(64);
230
            break;
231
        case SNB_GMCH_GMS_STOLEN_96M:
232
            stolen_size = MB(96);
233
            break;
234
        case SNB_GMCH_GMS_STOLEN_128M:
235
            stolen_size = MB(128);
236
            break;
237
        case SNB_GMCH_GMS_STOLEN_160M:
238
            stolen_size = MB(160);
239
            break;
240
        case SNB_GMCH_GMS_STOLEN_192M:
241
            stolen_size = MB(192);
242
            break;
243
        case SNB_GMCH_GMS_STOLEN_224M:
244
            stolen_size = MB(224);
245
            break;
246
        case SNB_GMCH_GMS_STOLEN_256M:
247
            stolen_size = MB(256);
248
            break;
249
        case SNB_GMCH_GMS_STOLEN_288M:
250
            stolen_size = MB(288);
251
            break;
252
        case SNB_GMCH_GMS_STOLEN_320M:
253
            stolen_size = MB(320);
254
            break;
255
        case SNB_GMCH_GMS_STOLEN_352M:
256
            stolen_size = MB(352);
257
            break;
258
        case SNB_GMCH_GMS_STOLEN_384M:
259
            stolen_size = MB(384);
260
            break;
261
        case SNB_GMCH_GMS_STOLEN_416M:
262
            stolen_size = MB(416);
263
            break;
264
        case SNB_GMCH_GMS_STOLEN_448M:
265
            stolen_size = MB(448);
266
            break;
267
        case SNB_GMCH_GMS_STOLEN_480M:
268
            stolen_size = MB(480);
269
            break;
270
        case SNB_GMCH_GMS_STOLEN_512M:
271
            stolen_size = MB(512);
272
            break;
273
        }
274
    } else {
275
        switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
276
        case I855_GMCH_GMS_STOLEN_1M:
277
            stolen_size = MB(1);
278
            break;
279
        case I855_GMCH_GMS_STOLEN_4M:
280
            stolen_size = MB(4);
281
            break;
282
        case I855_GMCH_GMS_STOLEN_8M:
283
            stolen_size = MB(8);
284
            break;
285
        case I855_GMCH_GMS_STOLEN_16M:
286
            stolen_size = MB(16);
287
            break;
288
        case I855_GMCH_GMS_STOLEN_32M:
289
            stolen_size = MB(32);
290
            break;
291
        case I915_GMCH_GMS_STOLEN_48M:
292
            stolen_size = MB(48);
293
            break;
294
        case I915_GMCH_GMS_STOLEN_64M:
295
            stolen_size = MB(64);
296
            break;
297
        case G33_GMCH_GMS_STOLEN_128M:
298
            stolen_size = MB(128);
299
            break;
300
        case G33_GMCH_GMS_STOLEN_256M:
301
            stolen_size = MB(256);
302
            break;
303
        case INTEL_GMCH_GMS_STOLEN_96M:
304
            stolen_size = MB(96);
305
            break;
306
        case INTEL_GMCH_GMS_STOLEN_160M:
307
            stolen_size = MB(160);
308
            break;
309
        case INTEL_GMCH_GMS_STOLEN_224M:
310
            stolen_size = MB(224);
311
            break;
312
        case INTEL_GMCH_GMS_STOLEN_352M:
313
            stolen_size = MB(352);
314
            break;
315
        default:
316
            stolen_size = 0;
317
            break;
318
        }
319
    }
320
 
321
    if (stolen_size > 0) {
322
        dbgprintf("detected %dK %s memory\n",
323
               stolen_size / KB(1), local ? "local" : "stolen");
324
    } else {
325
        dbgprintf("no pre-allocated video memory detected\n");
326
        stolen_size = 0;
327
    }
328
 
329
    return stolen_size;
330
}
331
 
332
static void i965_adjust_pgetbl_size(unsigned int size_flag)
333
{
334
    u32 pgetbl_ctl, pgetbl_ctl2;
335
 
336
    /* ensure that ppgtt is disabled */
337
    pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
338
    pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
339
    writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
340
 
341
    /* write the new ggtt size */
342
    pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
343
    pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
344
    pgetbl_ctl |= size_flag;
345
    writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
346
}
347
 
348
static unsigned int i965_gtt_total_entries(void)
349
{
350
    int size;
351
    u32 pgetbl_ctl;
352
    u16 gmch_ctl;
353
 
354
    pci_read_config_word(intel_private.bridge_dev,
355
                 I830_GMCH_CTRL, &gmch_ctl);
356
 
357
    if (INTEL_GTT_GEN == 5) {
358
        switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
359
        case G4x_GMCH_SIZE_1M:
360
        case G4x_GMCH_SIZE_VT_1M:
361
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
362
            break;
363
        case G4x_GMCH_SIZE_VT_1_5M:
364
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
365
            break;
366
        case G4x_GMCH_SIZE_2M:
367
        case G4x_GMCH_SIZE_VT_2M:
368
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
369
            break;
370
        }
371
    }
372
 
373
    pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
374
 
375
    switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
376
    case I965_PGETBL_SIZE_128KB:
377
        size = KB(128);
378
        break;
379
    case I965_PGETBL_SIZE_256KB:
380
        size = KB(256);
381
        break;
382
    case I965_PGETBL_SIZE_512KB:
383
        size = KB(512);
384
        break;
385
    /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
386
    case I965_PGETBL_SIZE_1MB:
387
        size = KB(1024);
388
        break;
389
    case I965_PGETBL_SIZE_2MB:
390
        size = KB(2048);
391
        break;
392
    case I965_PGETBL_SIZE_1_5MB:
393
        size = KB(1024 + 512);
394
        break;
395
    default:
396
        dbgprintf("unknown page table size, assuming 512KB\n");
397
        size = KB(512);
398
    }
399
 
400
    return size/4;
401
}
402
 
403
static unsigned int intel_gtt_total_entries(void)
404
{
405
    int size;
406
 
407
    if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
408
        return i965_gtt_total_entries();
409
    else if (INTEL_GTT_GEN == 6) {
410
        u16 snb_gmch_ctl;
411
 
412
        pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
413
        switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
414
        default:
415
        case SNB_GTT_SIZE_0M:
416
            printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
417
            size = MB(0);
418
            break;
419
        case SNB_GTT_SIZE_1M:
420
            size = MB(1);
421
            break;
422
        case SNB_GTT_SIZE_2M:
423
            size = MB(2);
424
            break;
425
        }
426
        return size/4;
427
    } else {
428
        /* On previous hardware, the GTT size was just what was
429
         * required to map the aperture.
430
         */
431
        return intel_private.base.gtt_mappable_entries;
432
    }
433
}
434
 
435
static unsigned int intel_gtt_mappable_entries(void)
436
{
437
    unsigned int aperture_size;
438
 
439
    if (INTEL_GTT_GEN == 1) {
440
        u32 smram_miscc;
441
 
442
        pci_read_config_dword(intel_private.bridge_dev,
443
                      I810_SMRAM_MISCC, &smram_miscc);
444
 
445
        if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
446
                == I810_GFX_MEM_WIN_32M)
447
            aperture_size = MB(32);
448
        else
449
            aperture_size = MB(64);
450
    } else if (INTEL_GTT_GEN == 2) {
451
        u16 gmch_ctrl;
452
 
453
        pci_read_config_word(intel_private.bridge_dev,
454
                     I830_GMCH_CTRL, &gmch_ctrl);
455
 
456
        if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
457
            aperture_size = MB(64);
458
        else
459
            aperture_size = MB(128);
460
    } else {
461
        /* 9xx supports large sizes, just look at the length */
462
        aperture_size = pci_resource_len(intel_private.pcidev, 2);
463
    }
464
 
465
    return aperture_size >> PAGE_SHIFT;
466
}
467
 
468
static void intel_gtt_teardown_scratch_page(void)
469
{
470
   // FreePage(intel_private.scratch_page_dma);
471
}
472
 
473
static void intel_gtt_cleanup(void)
474
{
475
    intel_private.driver->cleanup();
476
 
477
    FreeKernelSpace(intel_private.gtt);
478
    FreeKernelSpace(intel_private.registers);
479
 
480
  //  intel_gtt_teardown_scratch_page();
481
}
482
 
483
static int intel_gtt_init(void)
484
{
485
    u32 gtt_map_size;
486
    int ret;
487
 
488
    ENTER();
489
 
490
    ret = intel_private.driver->setup();
491
    if (ret != 0)
492
    {
493
        LEAVE();
494
        return ret;
495
    };
496
 
497
 
498
    intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
499
    intel_private.base.gtt_total_entries = intel_gtt_total_entries();
500
 
501
    /* save the PGETBL reg for resume */
502
    intel_private.PGETBL_save =
503
        readl(intel_private.registers+I810_PGETBL_CTL)
504
            & ~I810_PGETBL_ENABLED;
505
    /* we only ever restore the register when enabling the PGTBL... */
506
    if (HAS_PGTBL_EN)
507
        intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
508
 
509
    dbgprintf("detected gtt size: %dK total, %dK mappable\n",
510
            intel_private.base.gtt_total_entries * 4,
511
            intel_private.base.gtt_mappable_entries * 4);
512
 
513
    gtt_map_size = intel_private.base.gtt_total_entries * 4;
514
 
515
    intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
516
                    gtt_map_size, PG_SW+PG_NOCACHE);
517
    if (!intel_private.gtt) {
518
        intel_private.driver->cleanup();
519
        FreeKernelSpace(intel_private.registers);
520
        return -ENOMEM;
521
    }
522
 
523
    asm volatile("wbinvd");
524
 
525
    intel_private.base.stolen_size = intel_gtt_stolen_size();
526
 
527
    intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
528
 
529
    ret = intel_gtt_setup_scratch_page();
530
    if (ret != 0) {
531
        intel_gtt_cleanup();
532
        return ret;
533
    }
534
 
535
    intel_enable_gtt();
536
 
537
    LEAVE();
538
 
539
    return 0;
540
}
541
 
542
static bool intel_enable_gtt(void)
543
{
544
    u32 gma_addr;
545
    u8 __iomem *reg;
546
 
547
    if (INTEL_GTT_GEN <= 2)
548
        pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
549
                      &gma_addr);
550
    else
551
        pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
552
                      &gma_addr);
553
 
554
    intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
555
 
556
    if (INTEL_GTT_GEN >= 6)
557
        return true;
558
 
559
    if (INTEL_GTT_GEN == 2) {
560
        u16 gmch_ctrl;
561
 
562
        pci_read_config_word(intel_private.bridge_dev,
563
                     I830_GMCH_CTRL, &gmch_ctrl);
564
        gmch_ctrl |= I830_GMCH_ENABLED;
565
        pci_write_config_word(intel_private.bridge_dev,
566
                      I830_GMCH_CTRL, gmch_ctrl);
567
 
568
        pci_read_config_word(intel_private.bridge_dev,
569
                     I830_GMCH_CTRL, &gmch_ctrl);
570
        if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
571
            dbgprintf("failed to enable the GTT: GMCH_CTRL=%x\n",
572
                gmch_ctrl);
573
            return false;
574
        }
575
    }
576
 
577
    /* On the resume path we may be adjusting the PGTBL value, so
578
     * be paranoid and flush all chipset write buffers...
579
     */
580
    if (INTEL_GTT_GEN >= 3)
581
        writel(0, intel_private.registers+GFX_FLSH_CNTL);
582
 
583
    reg = intel_private.registers+I810_PGETBL_CTL;
584
    writel(intel_private.PGETBL_save, reg);
585
    if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
586
        dbgprintf("failed to enable the GTT: PGETBL=%x [expected %x]\n",
587
            readl(reg), intel_private.PGETBL_save);
588
        return false;
589
    }
590
 
591
    if (INTEL_GTT_GEN >= 3)
592
        writel(0, intel_private.registers+GFX_FLSH_CNTL);
593
 
594
    return true;
595
}
596
 
597
 
598
 
599
static void intel_i9xx_setup_flush(void)
600
{
601
    /* return if already configured */
602
    if (intel_private.ifp_resource.start)
603
        return;
604
 
605
    if (INTEL_GTT_GEN == 6)
606
        return;
607
 
608
#if 0
609
    /* setup a resource for this object */
610
    intel_private.ifp_resource.name = "Intel Flush Page";
611
    intel_private.ifp_resource.flags = IORESOURCE_MEM;
612
 
613
    /* Setup chipset flush for 915 */
614
    if (IS_G33 || INTEL_GTT_GEN >= 4) {
615
        intel_i965_g33_setup_chipset_flush();
616
    } else {
617
        intel_i915_setup_chipset_flush();
618
    }
619
 
620
    if (intel_private.ifp_resource.start)
621
        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
622
    if (!intel_private.i9xx_flush_page)
623
        dev_err(&intel_private.pcidev->dev,
624
            "can't ioremap flush page - no chipset flushing\n");
625
#endif
626
 
627
}
628
 
629
static void i9xx_chipset_flush(void)
630
{
631
    if (intel_private.i9xx_flush_page)
632
        writel(1, intel_private.i9xx_flush_page);
633
}
634
 
635
static bool gen6_check_flags(unsigned int flags)
636
{
637
    return true;
638
}
639
 
640
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
641
                 unsigned int flags)
642
{
643
    unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
644
    unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
645
    u32 pte_flags;
646
 
647
    if (type_mask == AGP_USER_MEMORY)
648
        pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
649
    else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
650
        pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
651
        if (gfdt)
652
            pte_flags |= GEN6_PTE_GFDT;
653
    } else { /* set 'normal'/'cached' to LLC by default */
654
        pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
655
        if (gfdt)
656
            pte_flags |= GEN6_PTE_GFDT;
657
    }
658
 
659
    /* gen6 has bit11-4 for physical addr bit39-32 */
660
    addr |= (addr >> 28) & 0xff0;
661
    writel(addr | pte_flags, intel_private.gtt + entry);
662
}
663
 
664
static void gen6_cleanup(void)
665
{
666
}
667
 
668
static int i9xx_setup(void)
669
{
670
    u32 reg_addr;
671
 
672
    pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
673
 
674
    reg_addr &= 0xfff80000;
675
 
676
    intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
677
 
678
    if (!intel_private.registers)
679
        return -ENOMEM;
680
 
681
    if (INTEL_GTT_GEN == 3) {
682
        u32 gtt_addr;
683
 
684
        pci_read_config_dword(intel_private.pcidev,
685
                      I915_PTEADDR, >t_addr);
686
        intel_private.gtt_bus_addr = gtt_addr;
687
    } else {
688
        u32 gtt_offset;
689
 
690
        switch (INTEL_GTT_GEN) {
691
        case 5:
692
        case 6:
693
            gtt_offset = MB(2);
694
            break;
695
        case 4:
696
        default:
697
            gtt_offset =  KB(512);
698
            break;
699
        }
700
        intel_private.gtt_bus_addr = reg_addr + gtt_offset;
701
    }
702
 
703
    intel_i9xx_setup_flush();
704
 
705
    return 0;
706
}
707
 
708
static const struct intel_gtt_driver sandybridge_gtt_driver = {
709
    .gen = 6,
710
    .setup = i9xx_setup,
711
    .cleanup = gen6_cleanup,
712
    .write_entry = gen6_write_entry,
713
    .dma_mask_size = 40,
714
    .check_flags = gen6_check_flags,
715
    .chipset_flush = i9xx_chipset_flush,
716
};
717
 
718
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
719
 * driver and gmch_driver must be non-null, and find_gmch will determine
720
 * which one should be used if a gmch_chip_id is present.
721
 */
722
static const struct intel_gtt_driver_description {
723
    unsigned int gmch_chip_id;
724
    char *name;
725
    const struct intel_gtt_driver *gtt_driver;
726
} intel_gtt_chipsets[] = {
727
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
728
        "Sandybridge", &sandybridge_gtt_driver },
729
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
730
        "Sandybridge", &sandybridge_gtt_driver },
731
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
732
        "Sandybridge", &sandybridge_gtt_driver },
733
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
734
        "Sandybridge", &sandybridge_gtt_driver },
735
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
736
        "Sandybridge", &sandybridge_gtt_driver },
737
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
738
        "Sandybridge", &sandybridge_gtt_driver },
739
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
740
        "Sandybridge", &sandybridge_gtt_driver },
741
    { 0, NULL, NULL }
742
};
743
 
744
static int find_gmch(u16 device)
745
{
746
    struct pci_dev *gmch_device;
747
 
748
    gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
749
    if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
750
        gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
751
                         device, gmch_device);
752
    }
753
 
754
    if (!gmch_device)
755
        return 0;
756
 
757
    intel_private.pcidev = gmch_device;
758
    return 1;
759
}
760
 
761
int intel_gmch_probe(struct pci_dev *pdev,
762
                      struct agp_bridge_data *bridge)
763
{
764
    int i, mask;
765
    intel_private.driver = NULL;
766
 
767
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
768
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
769
            intel_private.driver =
770
                intel_gtt_chipsets[i].gtt_driver;
771
            break;
772
        }
773
    }
774
 
775
    if (!intel_private.driver)
776
        return 0;
777
 
778
 //   bridge->driver = &intel_fake_agp_driver;
779
    bridge->dev_private_data = &intel_private;
780
    bridge->dev = pdev;
781
 
782
    intel_private.bridge_dev = pdev;
783
 
784
    dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
785
 
786
    mask = intel_private.driver->dma_mask_size;
787
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
788
//        dev_err(&intel_private.pcidev->dev,
789
//            "set gfx device dma mask %d-bit failed!\n", mask);
790
//    else
791
//        pci_set_consistent_dma_mask(intel_private.pcidev,
792
//                        DMA_BIT_MASK(mask));
793
 
794
    /*if (bridge->driver == &intel_810_driver)
795
        return 1;*/
796
 
797
    if (intel_gtt_init() != 0)
798
        return 0;
799
 
800
    return 1;
801
}
802