Subversion Repositories Kolibri OS

Rev

Rev 2339 | Rev 3031 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/*
2
 * Intel GTT (Graphics Translation Table) routines
3
 *
4
 * Caveat: This driver implements the linux agp interface, but this is far from
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
6
 * old userspace intel graphics drivers needed an interface to map memory into
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
9
 * avoid having to create a new api.
10
 *
11
 * With gem this does not make much sense anymore, just needlessly complicates
12
 * the code. But as long as the old graphics stack is still support, it's stuck
13
 * here.
14
 *
15
 * /fairy-tale-mode off
16
 */
17
 
18
#include 
19
#include 
20
#include 
21
#include 
22
//#include 
23
//#include 
24
//#include 
25
#include 
26
#include "agp.h"
27
#include "intel-agp.h"
28
#include "intel-gtt.h"
29
 
30
#include 
31
 
32
struct pci_dev *
33
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
34
 
35
static bool intel_enable_gtt(void);
36
 
37
 
38
#define PCI_VENDOR_ID_INTEL             0x8086
39
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
40
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
2339 Serge 41
#define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
42
#define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
43
#define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
44
#define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
2325 Serge 45
 
46
 
47
#define AGP_NORMAL_MEMORY 0
48
 
49
#define AGP_USER_TYPES (1 << 16)
50
#define AGP_USER_MEMORY (AGP_USER_TYPES)
51
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
52
 
53
 
54
static inline int pci_read_config_word(struct pci_dev *dev, int where,
55
                    u16 *val)
56
{
57
    *val = PciRead16(dev->busnr, dev->devfn, where);
58
    return 1;
59
}
60
 
61
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
62
                    u32 *val)
63
{
64
    *val = PciRead32(dev->busnr, dev->devfn, where);
65
    return 1;
66
}
67
 
68
static inline int pci_write_config_word(struct pci_dev *dev, int where,
69
                    u16 val)
70
{
71
    PciWrite16(dev->busnr, dev->devfn, where, val);
72
    return 1;
73
}
74
 
75
/*
76
 * If we have Intel graphics, we're not going to have anything other than
77
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
2339 Serge 78
 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
2325 Serge 79
 * Only newer chipsets need to bother with this, of course.
80
 */
2339 Serge 81
#ifdef CONFIG_INTEL_IOMMU
2325 Serge 82
#define USE_PCI_DMA_API 1
83
#else
84
#define USE_PCI_DMA_API 0
85
#endif
86
 
87
struct intel_gtt_driver {
88
    unsigned int gen : 8;
89
    unsigned int is_g33 : 1;
90
    unsigned int is_pineview : 1;
91
    unsigned int is_ironlake : 1;
92
    unsigned int has_pgtbl_enable : 1;
93
    unsigned int dma_mask_size : 8;
94
    /* Chipset specific GTT setup */
95
    int (*setup)(void);
96
    /* This should undo anything done in ->setup() save the unmapping
97
     * of the mmio register file, that's done in the generic code. */
98
    void (*cleanup)(void);
99
    void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
100
    /* Flags is a more or less chipset specific opaque value.
101
     * For chipsets that need to support old ums (non-gem) code, this
102
     * needs to be identical to the various supported agp memory types! */
103
    bool (*check_flags)(unsigned int flags);
104
    void (*chipset_flush)(void);
105
};
106
 
107
static struct _intel_private {
108
    struct intel_gtt base;
109
    const struct intel_gtt_driver *driver;
110
    struct pci_dev *pcidev; /* device one */
111
    struct pci_dev *bridge_dev;
112
    u8 __iomem *registers;
113
    phys_addr_t gtt_bus_addr;
114
    phys_addr_t gma_bus_addr;
115
    u32 PGETBL_save;
116
    u32 __iomem *gtt;       /* I915G */
117
    bool clear_fake_agp; /* on first access via agp, fill with scratch */
118
    int num_dcache_entries;
119
    void __iomem *i9xx_flush_page;
120
    char *i81x_gtt_table;
121
    struct resource ifp_resource;
122
    int resource_valid;
123
    struct page *scratch_page;
124
    dma_addr_t scratch_page_dma;
125
} intel_private;
126
 
127
#define INTEL_GTT_GEN   intel_private.driver->gen
128
#define IS_G33          intel_private.driver->is_g33
129
#define IS_PINEVIEW     intel_private.driver->is_pineview
130
#define IS_IRONLAKE     intel_private.driver->is_ironlake
131
#define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
132
 
133
static int intel_gtt_setup_scratch_page(void)
134
{
135
    addr_t page;
136
 
137
    page = AllocPage();
138
    if (page == 0)
139
        return -ENOMEM;
140
 
141
    intel_private.scratch_page_dma = page;
142
    intel_private.scratch_page = NULL;
143
 
144
    return 0;
145
}
146
 
147
static unsigned int intel_gtt_stolen_size(void)
148
{
149
    u16 gmch_ctrl;
150
    u8 rdct;
151
    int local = 0;
152
    static const int ddt[4] = { 0, 16, 32, 64 };
153
    unsigned int stolen_size = 0;
154
 
155
    if (INTEL_GTT_GEN == 1)
156
        return 0; /* no stolen mem on i81x */
157
 
158
    pci_read_config_word(intel_private.bridge_dev,
159
                 I830_GMCH_CTRL, &gmch_ctrl);
160
 
161
    if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
162
        intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
163
        switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
164
        case I830_GMCH_GMS_STOLEN_512:
165
            stolen_size = KB(512);
166
            break;
167
        case I830_GMCH_GMS_STOLEN_1024:
168
            stolen_size = MB(1);
169
            break;
170
        case I830_GMCH_GMS_STOLEN_8192:
171
            stolen_size = MB(8);
172
            break;
173
        case I830_GMCH_GMS_LOCAL:
174
            rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
175
            stolen_size = (I830_RDRAM_ND(rdct) + 1) *
176
                    MB(ddt[I830_RDRAM_DDT(rdct)]);
177
            local = 1;
178
            break;
179
        default:
180
            stolen_size = 0;
181
            break;
182
        }
183
    } else if (INTEL_GTT_GEN == 6) {
184
        /*
185
         * SandyBridge has new memory control reg at 0x50.w
186
         */
187
        u16 snb_gmch_ctl;
188
        pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
189
        switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
190
        case SNB_GMCH_GMS_STOLEN_32M:
191
            stolen_size = MB(32);
192
            break;
193
        case SNB_GMCH_GMS_STOLEN_64M:
194
            stolen_size = MB(64);
195
            break;
196
        case SNB_GMCH_GMS_STOLEN_96M:
197
            stolen_size = MB(96);
198
            break;
199
        case SNB_GMCH_GMS_STOLEN_128M:
200
            stolen_size = MB(128);
201
            break;
202
        case SNB_GMCH_GMS_STOLEN_160M:
203
            stolen_size = MB(160);
204
            break;
205
        case SNB_GMCH_GMS_STOLEN_192M:
206
            stolen_size = MB(192);
207
            break;
208
        case SNB_GMCH_GMS_STOLEN_224M:
209
            stolen_size = MB(224);
210
            break;
211
        case SNB_GMCH_GMS_STOLEN_256M:
212
            stolen_size = MB(256);
213
            break;
214
        case SNB_GMCH_GMS_STOLEN_288M:
215
            stolen_size = MB(288);
216
            break;
217
        case SNB_GMCH_GMS_STOLEN_320M:
218
            stolen_size = MB(320);
219
            break;
220
        case SNB_GMCH_GMS_STOLEN_352M:
221
            stolen_size = MB(352);
222
            break;
223
        case SNB_GMCH_GMS_STOLEN_384M:
224
            stolen_size = MB(384);
225
            break;
226
        case SNB_GMCH_GMS_STOLEN_416M:
227
            stolen_size = MB(416);
228
            break;
229
        case SNB_GMCH_GMS_STOLEN_448M:
230
            stolen_size = MB(448);
231
            break;
232
        case SNB_GMCH_GMS_STOLEN_480M:
233
            stolen_size = MB(480);
234
            break;
235
        case SNB_GMCH_GMS_STOLEN_512M:
236
            stolen_size = MB(512);
237
            break;
238
        }
239
    } else {
240
        switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
241
        case I855_GMCH_GMS_STOLEN_1M:
242
            stolen_size = MB(1);
243
            break;
244
        case I855_GMCH_GMS_STOLEN_4M:
245
            stolen_size = MB(4);
246
            break;
247
        case I855_GMCH_GMS_STOLEN_8M:
248
            stolen_size = MB(8);
249
            break;
250
        case I855_GMCH_GMS_STOLEN_16M:
251
            stolen_size = MB(16);
252
            break;
253
        case I855_GMCH_GMS_STOLEN_32M:
254
            stolen_size = MB(32);
255
            break;
256
        case I915_GMCH_GMS_STOLEN_48M:
257
            stolen_size = MB(48);
258
            break;
259
        case I915_GMCH_GMS_STOLEN_64M:
260
            stolen_size = MB(64);
261
            break;
262
        case G33_GMCH_GMS_STOLEN_128M:
263
            stolen_size = MB(128);
264
            break;
265
        case G33_GMCH_GMS_STOLEN_256M:
266
            stolen_size = MB(256);
267
            break;
268
        case INTEL_GMCH_GMS_STOLEN_96M:
269
            stolen_size = MB(96);
270
            break;
271
        case INTEL_GMCH_GMS_STOLEN_160M:
272
            stolen_size = MB(160);
273
            break;
274
        case INTEL_GMCH_GMS_STOLEN_224M:
275
            stolen_size = MB(224);
276
            break;
277
        case INTEL_GMCH_GMS_STOLEN_352M:
278
            stolen_size = MB(352);
279
            break;
280
        default:
281
            stolen_size = 0;
282
            break;
283
        }
284
    }
285
 
286
    if (stolen_size > 0) {
2332 Serge 287
		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
2325 Serge 288
               stolen_size / KB(1), local ? "local" : "stolen");
289
    } else {
2332 Serge 290
		dev_info(&intel_private.bridge_dev->dev,
291
		       "no pre-allocated video memory detected\n");
2325 Serge 292
        stolen_size = 0;
293
    }
294
 
295
    return stolen_size;
296
}
297
 
298
static void i965_adjust_pgetbl_size(unsigned int size_flag)
299
{
300
    u32 pgetbl_ctl, pgetbl_ctl2;
301
 
302
    /* ensure that ppgtt is disabled */
303
    pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
304
    pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
305
    writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
306
 
307
    /* write the new ggtt size */
308
    pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
309
    pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
310
    pgetbl_ctl |= size_flag;
311
    writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
312
}
313
 
314
static unsigned int i965_gtt_total_entries(void)
315
{
316
    int size;
317
    u32 pgetbl_ctl;
318
    u16 gmch_ctl;
319
 
320
    pci_read_config_word(intel_private.bridge_dev,
321
                 I830_GMCH_CTRL, &gmch_ctl);
322
 
323
    if (INTEL_GTT_GEN == 5) {
324
        switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
325
        case G4x_GMCH_SIZE_1M:
326
        case G4x_GMCH_SIZE_VT_1M:
327
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
328
            break;
329
        case G4x_GMCH_SIZE_VT_1_5M:
330
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
331
            break;
332
        case G4x_GMCH_SIZE_2M:
333
        case G4x_GMCH_SIZE_VT_2M:
334
            i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
335
            break;
336
        }
337
    }
338
 
339
    pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
340
 
341
    switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
342
    case I965_PGETBL_SIZE_128KB:
343
        size = KB(128);
344
        break;
345
    case I965_PGETBL_SIZE_256KB:
346
        size = KB(256);
347
        break;
348
    case I965_PGETBL_SIZE_512KB:
349
        size = KB(512);
350
        break;
351
    /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
352
    case I965_PGETBL_SIZE_1MB:
353
        size = KB(1024);
354
        break;
355
    case I965_PGETBL_SIZE_2MB:
356
        size = KB(2048);
357
        break;
358
    case I965_PGETBL_SIZE_1_5MB:
359
        size = KB(1024 + 512);
360
        break;
361
    default:
2332 Serge 362
		dev_info(&intel_private.pcidev->dev,
363
			 "unknown page table size, assuming 512KB\n");
2325 Serge 364
        size = KB(512);
365
    }
366
 
367
    return size/4;
368
}
369
 
370
static unsigned int intel_gtt_total_entries(void)
371
{
372
    int size;
373
 
374
    if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
375
        return i965_gtt_total_entries();
376
    else if (INTEL_GTT_GEN == 6) {
377
        u16 snb_gmch_ctl;
378
 
379
        pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
380
        switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
381
        default:
382
        case SNB_GTT_SIZE_0M:
383
            printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
384
            size = MB(0);
385
            break;
386
        case SNB_GTT_SIZE_1M:
387
            size = MB(1);
388
            break;
389
        case SNB_GTT_SIZE_2M:
390
            size = MB(2);
391
            break;
392
        }
393
        return size/4;
394
    } else {
395
        /* On previous hardware, the GTT size was just what was
396
         * required to map the aperture.
397
         */
398
        return intel_private.base.gtt_mappable_entries;
399
    }
400
}
401
 
402
static unsigned int intel_gtt_mappable_entries(void)
403
{
404
    unsigned int aperture_size;
405
 
406
    if (INTEL_GTT_GEN == 1) {
407
        u32 smram_miscc;
408
 
409
        pci_read_config_dword(intel_private.bridge_dev,
410
                      I810_SMRAM_MISCC, &smram_miscc);
411
 
412
        if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
413
                == I810_GFX_MEM_WIN_32M)
414
            aperture_size = MB(32);
415
        else
416
            aperture_size = MB(64);
417
    } else if (INTEL_GTT_GEN == 2) {
418
        u16 gmch_ctrl;
419
 
420
        pci_read_config_word(intel_private.bridge_dev,
421
                     I830_GMCH_CTRL, &gmch_ctrl);
422
 
423
        if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
424
            aperture_size = MB(64);
425
        else
426
            aperture_size = MB(128);
427
    } else {
428
        /* 9xx supports large sizes, just look at the length */
429
        aperture_size = pci_resource_len(intel_private.pcidev, 2);
430
    }
431
 
432
    return aperture_size >> PAGE_SHIFT;
433
}
434
 
435
static void intel_gtt_teardown_scratch_page(void)
436
{
437
   // FreePage(intel_private.scratch_page_dma);
438
}
439
 
440
static void intel_gtt_cleanup(void)
441
{
442
    intel_private.driver->cleanup();
443
 
444
    FreeKernelSpace(intel_private.gtt);
445
    FreeKernelSpace(intel_private.registers);
446
 
2339 Serge 447
	intel_gtt_teardown_scratch_page();
2325 Serge 448
}
449
 
450
static int intel_gtt_init(void)
451
{
452
    u32 gtt_map_size;
453
    int ret;
454
 
455
    ENTER();
456
 
457
    ret = intel_private.driver->setup();
458
    if (ret != 0)
459
    {
460
        LEAVE();
461
        return ret;
462
    };
463
 
464
 
465
    intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
466
    intel_private.base.gtt_total_entries = intel_gtt_total_entries();
467
 
468
    /* save the PGETBL reg for resume */
469
    intel_private.PGETBL_save =
470
        readl(intel_private.registers+I810_PGETBL_CTL)
471
            & ~I810_PGETBL_ENABLED;
472
    /* we only ever restore the register when enabling the PGTBL... */
473
    if (HAS_PGTBL_EN)
474
        intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
475
 
2339 Serge 476
	dev_info(&intel_private.bridge_dev->dev,
477
			"detected gtt size: %dK total, %dK mappable\n",
2325 Serge 478
            intel_private.base.gtt_total_entries * 4,
479
            intel_private.base.gtt_mappable_entries * 4);
480
 
481
    gtt_map_size = intel_private.base.gtt_total_entries * 4;
482
 
483
    intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
484
                    gtt_map_size, PG_SW+PG_NOCACHE);
485
    if (!intel_private.gtt) {
486
        intel_private.driver->cleanup();
487
        FreeKernelSpace(intel_private.registers);
488
        return -ENOMEM;
489
    }
490
 
491
    asm volatile("wbinvd");
492
 
493
    intel_private.base.stolen_size = intel_gtt_stolen_size();
494
 
495
    intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
496
 
497
    ret = intel_gtt_setup_scratch_page();
498
    if (ret != 0) {
499
        intel_gtt_cleanup();
500
        return ret;
501
    }
502
 
503
    intel_enable_gtt();
504
 
505
    LEAVE();
506
 
507
    return 0;
508
}
509
 
2339 Serge 510
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
511
			     unsigned int flags)
512
{
513
	u32 pte_flags = I810_PTE_VALID;
514
 
515
	if (flags ==  AGP_USER_CACHED_MEMORY)
516
		pte_flags |= I830_PTE_SYSTEM_CACHED;
517
 
518
	writel(addr | pte_flags, intel_private.gtt + entry);
519
}
520
 
2325 Serge 521
static bool intel_enable_gtt(void)
522
{
523
    u32 gma_addr;
524
    u8 __iomem *reg;
525
 
526
    if (INTEL_GTT_GEN <= 2)
527
        pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
528
                      &gma_addr);
529
    else
530
        pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
531
                      &gma_addr);
532
 
533
    intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
534
 
535
    if (INTEL_GTT_GEN >= 6)
536
        return true;
537
 
538
    if (INTEL_GTT_GEN == 2) {
539
        u16 gmch_ctrl;
540
 
541
        pci_read_config_word(intel_private.bridge_dev,
542
                     I830_GMCH_CTRL, &gmch_ctrl);
543
        gmch_ctrl |= I830_GMCH_ENABLED;
544
        pci_write_config_word(intel_private.bridge_dev,
545
                      I830_GMCH_CTRL, gmch_ctrl);
546
 
547
        pci_read_config_word(intel_private.bridge_dev,
548
                     I830_GMCH_CTRL, &gmch_ctrl);
549
        if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
2332 Serge 550
			dev_err(&intel_private.pcidev->dev,
551
				"failed to enable the GTT: GMCH_CTRL=%x\n",
2325 Serge 552
                gmch_ctrl);
553
            return false;
554
        }
555
    }
556
 
557
    /* On the resume path we may be adjusting the PGTBL value, so
558
     * be paranoid and flush all chipset write buffers...
559
     */
560
    if (INTEL_GTT_GEN >= 3)
561
        writel(0, intel_private.registers+GFX_FLSH_CNTL);
562
 
563
    reg = intel_private.registers+I810_PGETBL_CTL;
564
    writel(intel_private.PGETBL_save, reg);
565
    if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
2332 Serge 566
		dev_err(&intel_private.pcidev->dev,
567
			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
2325 Serge 568
            readl(reg), intel_private.PGETBL_save);
569
        return false;
570
    }
571
 
572
    if (INTEL_GTT_GEN >= 3)
573
        writel(0, intel_private.registers+GFX_FLSH_CNTL);
574
 
575
    return true;
576
}
577
 
2339 Serge 578
static bool i830_check_flags(unsigned int flags)
579
{
580
	switch (flags) {
581
	case 0:
582
	case AGP_PHYS_MEMORY:
583
	case AGP_USER_CACHED_MEMORY:
584
	case AGP_USER_MEMORY:
585
		return true;
586
	}
2325 Serge 587
 
2339 Serge 588
	return false;
589
}
590
 
2332 Serge 591
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
592
                struct page **pages, unsigned int flags)
593
{
594
    int i, j;
2325 Serge 595
 
2332 Serge 596
    for (i = 0, j = first_entry; i < num_entries; i++, j++) {
597
        dma_addr_t addr = (dma_addr_t)(pages[i]);
598
        intel_private.driver->write_entry(addr,
599
                          j, flags);
600
    }
601
    readl(intel_private.gtt+j-1);
602
}
2344 Serge 603
EXPORT_SYMBOL(intel_gtt_insert_pages);
2332 Serge 604
 
605
 
606
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
607
{
608
	unsigned int i;
609
 
610
	for (i = first_entry; i < (first_entry + num_entries); i++) {
611
		intel_private.driver->write_entry(intel_private.scratch_page_dma,
612
						  i, 0);
613
	}
614
	readl(intel_private.gtt+i-1);
615
}
616
 
2325 Serge 617
static void intel_i9xx_setup_flush(void)
618
{
619
    /* return if already configured */
620
    if (intel_private.ifp_resource.start)
621
        return;
622
 
623
    if (INTEL_GTT_GEN == 6)
624
        return;
625
 
626
    /* setup a resource for this object */
2339 Serge 627
//    intel_private.ifp_resource.name = "Intel Flush Page";
628
//    intel_private.ifp_resource.flags = IORESOURCE_MEM;
2325 Serge 629
 
2339 Serge 630
    intel_private.resource_valid = 0;
631
 
2325 Serge 632
    /* Setup chipset flush for 915 */
2339 Serge 633
//    if (IS_G33 || INTEL_GTT_GEN >= 4) {
634
//        intel_i965_g33_setup_chipset_flush();
635
//    } else {
636
//        intel_i915_setup_chipset_flush();
637
//    }
2325 Serge 638
 
2339 Serge 639
//    if (intel_private.ifp_resource.start)
640
//        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
2325 Serge 641
    if (!intel_private.i9xx_flush_page)
642
        dev_err(&intel_private.pcidev->dev,
643
            "can't ioremap flush page - no chipset flushing\n");
2339 Serge 644
}
2325 Serge 645
 
2339 Serge 646
static void i9xx_cleanup(void)
647
{
648
	if (intel_private.i9xx_flush_page)
649
		iounmap(intel_private.i9xx_flush_page);
650
//	if (intel_private.resource_valid)
651
//		release_resource(&intel_private.ifp_resource);
652
	intel_private.ifp_resource.start = 0;
653
	intel_private.resource_valid = 0;
2325 Serge 654
}
655
 
656
static void i9xx_chipset_flush(void)
657
{
658
    if (intel_private.i9xx_flush_page)
659
        writel(1, intel_private.i9xx_flush_page);
660
}
661
 
2339 Serge 662
static void i965_write_entry(dma_addr_t addr,
663
			     unsigned int entry,
664
			     unsigned int flags)
665
{
666
	u32 pte_flags;
667
 
668
	pte_flags = I810_PTE_VALID;
669
	if (flags == AGP_USER_CACHED_MEMORY)
670
		pte_flags |= I830_PTE_SYSTEM_CACHED;
671
 
672
	/* Shift high bits down */
673
	addr |= (addr >> 28) & 0xf0;
674
	writel(addr | pte_flags, intel_private.gtt + entry);
675
}
676
 
2325 Serge 677
static bool gen6_check_flags(unsigned int flags)
678
{
679
    return true;
680
}
681
 
682
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
683
                 unsigned int flags)
684
{
685
    unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
686
    unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
687
    u32 pte_flags;
688
 
689
    if (type_mask == AGP_USER_MEMORY)
690
        pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
691
    else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
692
        pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
693
        if (gfdt)
694
            pte_flags |= GEN6_PTE_GFDT;
695
    } else { /* set 'normal'/'cached' to LLC by default */
696
        pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
697
        if (gfdt)
698
            pte_flags |= GEN6_PTE_GFDT;
699
    }
700
 
701
    /* gen6 has bit11-4 for physical addr bit39-32 */
702
    addr |= (addr >> 28) & 0xff0;
703
    writel(addr | pte_flags, intel_private.gtt + entry);
704
}
705
 
706
static void gen6_cleanup(void)
707
{
708
}
709
 
2339 Serge 710
/* Certain Gen5 chipsets require require idling the GPU before
711
 * unmapping anything from the GTT when VT-d is enabled.
712
 */
713
static inline int needs_idle_maps(void)
714
{
715
#ifdef CONFIG_INTEL_IOMMU
716
	const unsigned short gpu_devid = intel_private.pcidev->device;
717
	extern int intel_iommu_gfx_mapped;
718
 
719
	/* Query intel_iommu to see if we need the workaround. Presumably that
720
	 * was loaded first.
721
	 */
722
	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
723
	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
724
	     intel_iommu_gfx_mapped)
725
		return 1;
726
#endif
727
	return 0;
728
}
729
 
2325 Serge 730
static int i9xx_setup(void)
731
{
732
    u32 reg_addr;
733
 
734
    pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
735
 
736
    reg_addr &= 0xfff80000;
737
 
738
    intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
739
 
740
    if (!intel_private.registers)
741
        return -ENOMEM;
742
 
743
    if (INTEL_GTT_GEN == 3) {
744
        u32 gtt_addr;
745
 
746
        pci_read_config_dword(intel_private.pcidev,
747
                      I915_PTEADDR, >t_addr);
748
        intel_private.gtt_bus_addr = gtt_addr;
749
    } else {
750
        u32 gtt_offset;
751
 
752
        switch (INTEL_GTT_GEN) {
753
        case 5:
754
        case 6:
755
            gtt_offset = MB(2);
756
            break;
757
        case 4:
758
        default:
759
            gtt_offset =  KB(512);
760
            break;
761
        }
762
        intel_private.gtt_bus_addr = reg_addr + gtt_offset;
763
    }
764
 
2339 Serge 765
	if (needs_idle_maps())
766
		intel_private.base.do_idle_maps = 1;
767
 
2325 Serge 768
    intel_i9xx_setup_flush();
769
 
770
    return 0;
771
}
772
 
2339 Serge 773
static const struct intel_gtt_driver i915_gtt_driver = {
774
	.gen = 3,
775
	.has_pgtbl_enable = 1,
776
	.setup = i9xx_setup,
777
	.cleanup = i9xx_cleanup,
778
	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
779
	.write_entry = i830_write_entry,
780
	.dma_mask_size = 32,
781
	.check_flags = i830_check_flags,
782
	.chipset_flush = i9xx_chipset_flush,
783
};
784
static const struct intel_gtt_driver g33_gtt_driver = {
785
	.gen = 3,
786
	.is_g33 = 1,
787
	.setup = i9xx_setup,
788
	.cleanup = i9xx_cleanup,
789
	.write_entry = i965_write_entry,
790
	.dma_mask_size = 36,
791
	.check_flags = i830_check_flags,
792
	.chipset_flush = i9xx_chipset_flush,
793
};
794
static const struct intel_gtt_driver pineview_gtt_driver = {
795
	.gen = 3,
796
	.is_pineview = 1, .is_g33 = 1,
797
	.setup = i9xx_setup,
798
	.cleanup = i9xx_cleanup,
799
	.write_entry = i965_write_entry,
800
	.dma_mask_size = 36,
801
	.check_flags = i830_check_flags,
802
	.chipset_flush = i9xx_chipset_flush,
803
};
804
static const struct intel_gtt_driver i965_gtt_driver = {
805
	.gen = 4,
806
	.has_pgtbl_enable = 1,
807
	.setup = i9xx_setup,
808
	.cleanup = i9xx_cleanup,
809
	.write_entry = i965_write_entry,
810
	.dma_mask_size = 36,
811
	.check_flags = i830_check_flags,
812
	.chipset_flush = i9xx_chipset_flush,
813
};
814
static const struct intel_gtt_driver g4x_gtt_driver = {
815
	.gen = 5,
816
	.setup = i9xx_setup,
817
	.cleanup = i9xx_cleanup,
818
	.write_entry = i965_write_entry,
819
	.dma_mask_size = 36,
820
	.check_flags = i830_check_flags,
821
	.chipset_flush = i9xx_chipset_flush,
822
};
823
static const struct intel_gtt_driver ironlake_gtt_driver = {
824
	.gen = 5,
825
	.is_ironlake = 1,
826
	.setup = i9xx_setup,
827
	.cleanup = i9xx_cleanup,
828
	.write_entry = i965_write_entry,
829
	.dma_mask_size = 36,
830
	.check_flags = i830_check_flags,
831
	.chipset_flush = i9xx_chipset_flush,
832
};
2325 Serge 833
static const struct intel_gtt_driver sandybridge_gtt_driver = {
834
    .gen = 6,
835
    .setup = i9xx_setup,
836
    .cleanup = gen6_cleanup,
837
    .write_entry = gen6_write_entry,
838
    .dma_mask_size = 40,
839
    .check_flags = gen6_check_flags,
840
    .chipset_flush = i9xx_chipset_flush,
841
};
842
 
843
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
844
 * driver and gmch_driver must be non-null, and find_gmch will determine
845
 * which one should be used if a gmch_chip_id is present.
846
 */
847
static const struct intel_gtt_driver_description {
848
    unsigned int gmch_chip_id;
849
    char *name;
850
    const struct intel_gtt_driver *gtt_driver;
851
} intel_gtt_chipsets[] = {
2339 Serge 852
	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
853
		&i915_gtt_driver },
854
	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
855
		&i915_gtt_driver },
856
	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
857
		&i915_gtt_driver },
858
	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
859
		&i915_gtt_driver },
860
	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
861
		&i915_gtt_driver },
862
	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
863
		&i915_gtt_driver },
864
	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
865
		&i965_gtt_driver },
866
	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
867
		&i965_gtt_driver },
868
	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
869
		&i965_gtt_driver },
870
	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
871
		&i965_gtt_driver },
872
	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
873
		&i965_gtt_driver },
874
	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
875
		&i965_gtt_driver },
876
	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
877
		&g33_gtt_driver },
878
	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
879
		&g33_gtt_driver },
880
	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
881
		&g33_gtt_driver },
882
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
883
		&pineview_gtt_driver },
884
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
885
		&pineview_gtt_driver },
886
	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
887
		&g4x_gtt_driver },
888
	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
889
		&g4x_gtt_driver },
890
	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
891
		&g4x_gtt_driver },
892
	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
893
		&g4x_gtt_driver },
894
	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
895
		&g4x_gtt_driver },
896
	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
897
		&g4x_gtt_driver },
898
	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
899
		&g4x_gtt_driver },
900
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
901
	    "HD Graphics", &ironlake_gtt_driver },
902
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
903
	    "HD Graphics", &ironlake_gtt_driver },
2325 Serge 904
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
905
        "Sandybridge", &sandybridge_gtt_driver },
906
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
907
        "Sandybridge", &sandybridge_gtt_driver },
908
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
909
        "Sandybridge", &sandybridge_gtt_driver },
910
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
911
        "Sandybridge", &sandybridge_gtt_driver },
912
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
913
        "Sandybridge", &sandybridge_gtt_driver },
914
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
915
        "Sandybridge", &sandybridge_gtt_driver },
916
    { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
917
        "Sandybridge", &sandybridge_gtt_driver },
2339 Serge 918
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
919
	    "Ivybridge", &sandybridge_gtt_driver },
920
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
921
	    "Ivybridge", &sandybridge_gtt_driver },
922
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
923
	    "Ivybridge", &sandybridge_gtt_driver },
924
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
925
	    "Ivybridge", &sandybridge_gtt_driver },
926
	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
927
	    "Ivybridge", &sandybridge_gtt_driver },
2325 Serge 928
    { 0, NULL, NULL }
929
};
930
 
931
static int find_gmch(u16 device)
932
{
933
    struct pci_dev *gmch_device;
934
 
935
    gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
936
    if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
937
        gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
938
                         device, gmch_device);
939
    }
940
 
941
    if (!gmch_device)
942
        return 0;
943
 
944
    intel_private.pcidev = gmch_device;
945
    return 1;
946
}
947
 
948
int intel_gmch_probe(struct pci_dev *pdev,
949
                      struct agp_bridge_data *bridge)
950
{
951
    int i, mask;
952
    intel_private.driver = NULL;
953
 
954
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
955
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
956
            intel_private.driver =
957
                intel_gtt_chipsets[i].gtt_driver;
958
            break;
959
        }
960
    }
961
 
962
    if (!intel_private.driver)
963
        return 0;
964
 
965
 //   bridge->driver = &intel_fake_agp_driver;
966
    bridge->dev_private_data = &intel_private;
967
    bridge->dev = pdev;
968
 
969
    intel_private.bridge_dev = pdev;
970
 
971
    dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
972
 
973
    mask = intel_private.driver->dma_mask_size;
974
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
975
//        dev_err(&intel_private.pcidev->dev,
976
//            "set gfx device dma mask %d-bit failed!\n", mask);
977
//    else
978
//        pci_set_consistent_dma_mask(intel_private.pcidev,
979
//                        DMA_BIT_MASK(mask));
980
 
981
    /*if (bridge->driver == &intel_810_driver)
982
        return 1;*/
983
 
984
    if (intel_gtt_init() != 0)
985
        return 0;
986
 
987
    return 1;
988
}
2339 Serge 989
EXPORT_SYMBOL(intel_gmch_probe);
2325 Serge 990
 
2326 Serge 991
const struct intel_gtt *intel_gtt_get(void)
992
{
993
    return &intel_private.base;
994
}
2339 Serge 995
EXPORT_SYMBOL(intel_gtt_get);
2326 Serge 996
 
2332 Serge 997
void intel_gtt_chipset_flush(void)
998
{
999
	if (intel_private.driver->chipset_flush)
1000
		intel_private.driver->chipset_flush();
1001
}
2339 Serge 1002
EXPORT_SYMBOL(intel_gtt_chipset_flush);
2327 Serge 1003
 
2332 Serge 1004
 
2327 Serge 1005
phys_addr_t get_bus_addr(void)
1006
{
1007
    return intel_private.gma_bus_addr;
1008
};