Subversion Repositories Kolibri OS

Rev

Rev 3031 | Rev 3243 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <linux/module.h>
  19. #include <errno-base.h>
  20. #include <linux/pci.h>
  21. #include <linux/kernel.h>
  22. #include <linux/export.h>
  23. //#include <linux/pagemap.h>
  24. //#include <linux/agp_backend.h>
  25. //#include <asm/smp.h>
  26. #include <linux/spinlock.h>
  27. #include "agp.h"
  28. #include "intel-agp.h"
  29. #include "intel-gtt.h"
  30.  
  31. #include <syscall.h>
  32.  
  33. struct pci_dev *
  34. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  35.  
  36.  
  37. #define PCI_VENDOR_ID_INTEL             0x8086
  38. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  39. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  40. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  41. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  42. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  43. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  44.  
  45.  
  46. #define AGP_NORMAL_MEMORY 0
  47.  
  48. #define AGP_USER_TYPES (1 << 16)
  49. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  50. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  51.  
  52.  
  53.  
  54. /*
  55.  * If we have Intel graphics, we're not going to have anything other than
  56.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  57.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  58.  * Only newer chipsets need to bother with this, of course.
  59.  */
  60. #ifdef CONFIG_INTEL_IOMMU
  61. #define USE_PCI_DMA_API 1
  62. #else
  63. #define USE_PCI_DMA_API 0
  64. #endif
  65.  
  66. struct intel_gtt_driver {
  67.     unsigned int gen : 8;
  68.     unsigned int is_g33 : 1;
  69.     unsigned int is_pineview : 1;
  70.     unsigned int is_ironlake : 1;
  71.     unsigned int has_pgtbl_enable : 1;
  72.     unsigned int dma_mask_size : 8;
  73.     /* Chipset specific GTT setup */
  74.     int (*setup)(void);
  75.     /* This should undo anything done in ->setup() save the unmapping
  76.      * of the mmio register file, that's done in the generic code. */
  77.     void (*cleanup)(void);
  78.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  79.     /* Flags is a more or less chipset specific opaque value.
  80.      * For chipsets that need to support old ums (non-gem) code, this
  81.      * needs to be identical to the various supported agp memory types! */
  82.     bool (*check_flags)(unsigned int flags);
  83.     void (*chipset_flush)(void);
  84. };
  85.  
  86. static struct _intel_private {
  87.     struct intel_gtt base;
  88.     const struct intel_gtt_driver *driver;
  89.     struct pci_dev *pcidev; /* device one */
  90.     struct pci_dev *bridge_dev;
  91.     u8 __iomem *registers;
  92.     phys_addr_t gtt_bus_addr;
  93.     u32 PGETBL_save;
  94.     u32 __iomem *gtt;       /* I915G */
  95.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  96.     int num_dcache_entries;
  97.     void __iomem *i9xx_flush_page;
  98.     char *i81x_gtt_table;
  99.     struct resource ifp_resource;
  100.     int resource_valid;
  101.     struct page *scratch_page;
  102.         int refcount;
  103. } intel_private;
  104.  
  105. #define INTEL_GTT_GEN   intel_private.driver->gen
  106. #define IS_G33          intel_private.driver->is_g33
  107. #define IS_PINEVIEW     intel_private.driver->is_pineview
  108. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  109. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  110.  
  111. static int intel_gtt_setup_scratch_page(void)
  112. {
  113.         dma_addr_t dma_addr;
  114.  
  115.     dma_addr = AllocPage();
  116.     if (dma_addr == 0)
  117.         return -ENOMEM;
  118.  
  119.     intel_private.base.scratch_page_dma = dma_addr;
  120.     intel_private.scratch_page = NULL;
  121.  
  122.     return 0;
  123. }
  124.  
  125. static unsigned int intel_gtt_stolen_size(void)
  126. {
  127.     u16 gmch_ctrl;
  128.     u8 rdct;
  129.     int local = 0;
  130.     static const int ddt[4] = { 0, 16, 32, 64 };
  131.     unsigned int stolen_size = 0;
  132.  
  133.     if (INTEL_GTT_GEN == 1)
  134.         return 0; /* no stolen mem on i81x */
  135.  
  136.     pci_read_config_word(intel_private.bridge_dev,
  137.                  I830_GMCH_CTRL, &gmch_ctrl);
  138.  
  139.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  140.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  141.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  142.         case I830_GMCH_GMS_STOLEN_512:
  143.             stolen_size = KB(512);
  144.             break;
  145.         case I830_GMCH_GMS_STOLEN_1024:
  146.             stolen_size = MB(1);
  147.             break;
  148.         case I830_GMCH_GMS_STOLEN_8192:
  149.             stolen_size = MB(8);
  150.             break;
  151.         case I830_GMCH_GMS_LOCAL:
  152.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  153.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  154.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  155.             local = 1;
  156.             break;
  157.         default:
  158.             stolen_size = 0;
  159.             break;
  160.         }
  161.     } else if (INTEL_GTT_GEN == 6) {
  162.         /*
  163.          * SandyBridge has new memory control reg at 0x50.w
  164.          */
  165.         u16 snb_gmch_ctl;
  166.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  167.         switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
  168.         case SNB_GMCH_GMS_STOLEN_32M:
  169.             stolen_size = MB(32);
  170.             break;
  171.         case SNB_GMCH_GMS_STOLEN_64M:
  172.             stolen_size = MB(64);
  173.             break;
  174.         case SNB_GMCH_GMS_STOLEN_96M:
  175.             stolen_size = MB(96);
  176.             break;
  177.         case SNB_GMCH_GMS_STOLEN_128M:
  178.             stolen_size = MB(128);
  179.             break;
  180.         case SNB_GMCH_GMS_STOLEN_160M:
  181.             stolen_size = MB(160);
  182.             break;
  183.         case SNB_GMCH_GMS_STOLEN_192M:
  184.             stolen_size = MB(192);
  185.             break;
  186.         case SNB_GMCH_GMS_STOLEN_224M:
  187.             stolen_size = MB(224);
  188.             break;
  189.         case SNB_GMCH_GMS_STOLEN_256M:
  190.             stolen_size = MB(256);
  191.             break;
  192.         case SNB_GMCH_GMS_STOLEN_288M:
  193.             stolen_size = MB(288);
  194.             break;
  195.         case SNB_GMCH_GMS_STOLEN_320M:
  196.             stolen_size = MB(320);
  197.             break;
  198.         case SNB_GMCH_GMS_STOLEN_352M:
  199.             stolen_size = MB(352);
  200.             break;
  201.         case SNB_GMCH_GMS_STOLEN_384M:
  202.             stolen_size = MB(384);
  203.             break;
  204.         case SNB_GMCH_GMS_STOLEN_416M:
  205.             stolen_size = MB(416);
  206.             break;
  207.         case SNB_GMCH_GMS_STOLEN_448M:
  208.             stolen_size = MB(448);
  209.             break;
  210.         case SNB_GMCH_GMS_STOLEN_480M:
  211.             stolen_size = MB(480);
  212.             break;
  213.         case SNB_GMCH_GMS_STOLEN_512M:
  214.             stolen_size = MB(512);
  215.             break;
  216.         }
  217.     } else {
  218.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  219.         case I855_GMCH_GMS_STOLEN_1M:
  220.             stolen_size = MB(1);
  221.             break;
  222.         case I855_GMCH_GMS_STOLEN_4M:
  223.             stolen_size = MB(4);
  224.             break;
  225.         case I855_GMCH_GMS_STOLEN_8M:
  226.             stolen_size = MB(8);
  227.             break;
  228.         case I855_GMCH_GMS_STOLEN_16M:
  229.             stolen_size = MB(16);
  230.             break;
  231.         case I855_GMCH_GMS_STOLEN_32M:
  232.             stolen_size = MB(32);
  233.             break;
  234.         case I915_GMCH_GMS_STOLEN_48M:
  235.             stolen_size = MB(48);
  236.             break;
  237.         case I915_GMCH_GMS_STOLEN_64M:
  238.             stolen_size = MB(64);
  239.             break;
  240.         case G33_GMCH_GMS_STOLEN_128M:
  241.             stolen_size = MB(128);
  242.             break;
  243.         case G33_GMCH_GMS_STOLEN_256M:
  244.             stolen_size = MB(256);
  245.             break;
  246.         case INTEL_GMCH_GMS_STOLEN_96M:
  247.             stolen_size = MB(96);
  248.             break;
  249.         case INTEL_GMCH_GMS_STOLEN_160M:
  250.             stolen_size = MB(160);
  251.             break;
  252.         case INTEL_GMCH_GMS_STOLEN_224M:
  253.             stolen_size = MB(224);
  254.             break;
  255.         case INTEL_GMCH_GMS_STOLEN_352M:
  256.             stolen_size = MB(352);
  257.             break;
  258.         default:
  259.             stolen_size = 0;
  260.             break;
  261.         }
  262.     }
  263.  
  264.     if (stolen_size > 0) {
  265.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  266.                stolen_size / KB(1), local ? "local" : "stolen");
  267.     } else {
  268.                 dev_info(&intel_private.bridge_dev->dev,
  269.                        "no pre-allocated video memory detected\n");
  270.         stolen_size = 0;
  271.     }
  272.  
  273.     return stolen_size;
  274. }
  275.  
  276. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  277. {
  278.     u32 pgetbl_ctl, pgetbl_ctl2;
  279.  
  280.     /* ensure that ppgtt is disabled */
  281.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  282.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  283.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  284.  
  285.     /* write the new ggtt size */
  286.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  287.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  288.     pgetbl_ctl |= size_flag;
  289.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  290. }
  291.  
  292. static unsigned int i965_gtt_total_entries(void)
  293. {
  294.     int size;
  295.     u32 pgetbl_ctl;
  296.     u16 gmch_ctl;
  297.  
  298.     pci_read_config_word(intel_private.bridge_dev,
  299.                  I830_GMCH_CTRL, &gmch_ctl);
  300.  
  301.     if (INTEL_GTT_GEN == 5) {
  302.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  303.         case G4x_GMCH_SIZE_1M:
  304.         case G4x_GMCH_SIZE_VT_1M:
  305.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  306.             break;
  307.         case G4x_GMCH_SIZE_VT_1_5M:
  308.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  309.             break;
  310.         case G4x_GMCH_SIZE_2M:
  311.         case G4x_GMCH_SIZE_VT_2M:
  312.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  313.             break;
  314.         }
  315.     }
  316.  
  317.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  318.  
  319.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  320.     case I965_PGETBL_SIZE_128KB:
  321.         size = KB(128);
  322.         break;
  323.     case I965_PGETBL_SIZE_256KB:
  324.         size = KB(256);
  325.         break;
  326.     case I965_PGETBL_SIZE_512KB:
  327.         size = KB(512);
  328.         break;
  329.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  330.     case I965_PGETBL_SIZE_1MB:
  331.         size = KB(1024);
  332.         break;
  333.     case I965_PGETBL_SIZE_2MB:
  334.         size = KB(2048);
  335.         break;
  336.     case I965_PGETBL_SIZE_1_5MB:
  337.         size = KB(1024 + 512);
  338.         break;
  339.     default:
  340.                 dev_info(&intel_private.pcidev->dev,
  341.                          "unknown page table size, assuming 512KB\n");
  342.         size = KB(512);
  343.     }
  344.  
  345.     return size/4;
  346. }
  347.  
  348. static unsigned int intel_gtt_total_entries(void)
  349. {
  350.     int size;
  351.  
  352.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  353.         return i965_gtt_total_entries();
  354.     else if (INTEL_GTT_GEN == 6) {
  355.         u16 snb_gmch_ctl;
  356.  
  357.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  358.         switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
  359.         default:
  360.         case SNB_GTT_SIZE_0M:
  361.             printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
  362.             size = MB(0);
  363.             break;
  364.         case SNB_GTT_SIZE_1M:
  365.             size = MB(1);
  366.             break;
  367.         case SNB_GTT_SIZE_2M:
  368.             size = MB(2);
  369.             break;
  370.         }
  371.         return size/4;
  372.     } else {
  373.         /* On previous hardware, the GTT size was just what was
  374.          * required to map the aperture.
  375.          */
  376.         return intel_private.base.gtt_mappable_entries;
  377.     }
  378. }
  379.  
  380. static unsigned int intel_gtt_mappable_entries(void)
  381. {
  382.     unsigned int aperture_size;
  383.  
  384.     if (INTEL_GTT_GEN == 1) {
  385.         u32 smram_miscc;
  386.  
  387.         pci_read_config_dword(intel_private.bridge_dev,
  388.                       I810_SMRAM_MISCC, &smram_miscc);
  389.  
  390.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  391.                 == I810_GFX_MEM_WIN_32M)
  392.             aperture_size = MB(32);
  393.         else
  394.             aperture_size = MB(64);
  395.     } else if (INTEL_GTT_GEN == 2) {
  396.         u16 gmch_ctrl;
  397.  
  398.         pci_read_config_word(intel_private.bridge_dev,
  399.                      I830_GMCH_CTRL, &gmch_ctrl);
  400.  
  401.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  402.             aperture_size = MB(64);
  403.         else
  404.             aperture_size = MB(128);
  405.     } else {
  406.         /* 9xx supports large sizes, just look at the length */
  407.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  408.     }
  409.  
  410.     return aperture_size >> PAGE_SHIFT;
  411. }
  412.  
  413. static void intel_gtt_teardown_scratch_page(void)
  414. {
  415.    // FreePage(intel_private.scratch_page_dma);
  416. }
  417.  
  418. static void intel_gtt_cleanup(void)
  419. {
  420.     intel_private.driver->cleanup();
  421.  
  422.         iounmap(intel_private.gtt);
  423.         iounmap(intel_private.registers);
  424.  
  425.         intel_gtt_teardown_scratch_page();
  426. }
  427.  
  428. static int intel_gtt_init(void)
  429. {
  430.         u32 gma_addr;
  431.     u32 gtt_map_size;
  432.     int ret;
  433.  
  434.     ret = intel_private.driver->setup();
  435.     if (ret != 0)
  436.     {
  437.         return ret;
  438.     };
  439.  
  440.  
  441.     intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
  442.     intel_private.base.gtt_total_entries = intel_gtt_total_entries();
  443.  
  444.     /* save the PGETBL reg for resume */
  445.     intel_private.PGETBL_save =
  446.         readl(intel_private.registers+I810_PGETBL_CTL)
  447.             & ~I810_PGETBL_ENABLED;
  448.     /* we only ever restore the register when enabling the PGTBL... */
  449.     if (HAS_PGTBL_EN)
  450.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  451.  
  452.         dev_info(&intel_private.bridge_dev->dev,
  453.                         "detected gtt size: %dK total, %dK mappable\n",
  454.             intel_private.base.gtt_total_entries * 4,
  455.             intel_private.base.gtt_mappable_entries * 4);
  456.  
  457.     gtt_map_size = intel_private.base.gtt_total_entries * 4;
  458.  
  459.         intel_private.gtt = NULL;
  460. //   if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
  461. //       intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
  462. //                          gtt_map_size);
  463.         if (intel_private.gtt == NULL)
  464.                 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
  465.                                             gtt_map_size);
  466.         if (intel_private.gtt == NULL) {
  467.         intel_private.driver->cleanup();
  468.                 iounmap(intel_private.registers);
  469.         return -ENOMEM;
  470.     }
  471.         intel_private.base.gtt = intel_private.gtt;
  472.  
  473.     asm volatile("wbinvd");
  474.  
  475.     intel_private.base.stolen_size = intel_gtt_stolen_size();
  476.  
  477.     intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  478.  
  479.     ret = intel_gtt_setup_scratch_page();
  480.     if (ret != 0) {
  481.         intel_gtt_cleanup();
  482.         return ret;
  483.     }
  484.  
  485.         if (INTEL_GTT_GEN <= 2)
  486.                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  487.                                       &gma_addr);
  488.         else
  489.                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  490.                                       &gma_addr);
  491.  
  492.         intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  493.  
  494.     return 0;
  495. }
  496.  
  497. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  498.                              unsigned int flags)
  499. {
  500.         u32 pte_flags = I810_PTE_VALID;
  501.  
  502.         if (flags ==  AGP_USER_CACHED_MEMORY)
  503.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  504.  
  505.         writel(addr | pte_flags, intel_private.gtt + entry);
  506. }
  507.  
  508. bool intel_enable_gtt(void)
  509. {
  510.     u8 __iomem *reg;
  511.  
  512.     if (INTEL_GTT_GEN >= 6)
  513.         return true;
  514.  
  515.     if (INTEL_GTT_GEN == 2) {
  516.         u16 gmch_ctrl;
  517.  
  518.         pci_read_config_word(intel_private.bridge_dev,
  519.                      I830_GMCH_CTRL, &gmch_ctrl);
  520.         gmch_ctrl |= I830_GMCH_ENABLED;
  521.         pci_write_config_word(intel_private.bridge_dev,
  522.                       I830_GMCH_CTRL, gmch_ctrl);
  523.  
  524.         pci_read_config_word(intel_private.bridge_dev,
  525.                      I830_GMCH_CTRL, &gmch_ctrl);
  526.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  527.                         dev_err(&intel_private.pcidev->dev,
  528.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  529.                 gmch_ctrl);
  530.             return false;
  531.         }
  532.     }
  533.  
  534.     /* On the resume path we may be adjusting the PGTBL value, so
  535.      * be paranoid and flush all chipset write buffers...
  536.      */
  537.     if (INTEL_GTT_GEN >= 3)
  538.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  539.  
  540.     reg = intel_private.registers+I810_PGETBL_CTL;
  541.     writel(intel_private.PGETBL_save, reg);
  542.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  543.                 dev_err(&intel_private.pcidev->dev,
  544.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  545.             readl(reg), intel_private.PGETBL_save);
  546.         return false;
  547.     }
  548.  
  549.     if (INTEL_GTT_GEN >= 3)
  550.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  551.  
  552.     return true;
  553. }
  554.  
  555. static bool i830_check_flags(unsigned int flags)
  556. {
  557.         switch (flags) {
  558.         case 0:
  559.         case AGP_PHYS_MEMORY:
  560.         case AGP_USER_CACHED_MEMORY:
  561.         case AGP_USER_MEMORY:
  562.                 return true;
  563.         }
  564.  
  565.         return false;
  566. }
  567.  
  568. void intel_gtt_insert_sg_entries(struct pagelist *st,
  569.                                  unsigned int pg_start,
  570.                                  unsigned int flags)
  571. {
  572.     int i, j;
  573.  
  574.         j = pg_start;
  575.  
  576.     for(i = 0; i < st->nents; i++)
  577.     {
  578.         dma_addr_t addr = st->page[i];
  579.         intel_private.driver->write_entry(addr, j, flags);
  580.         j++;
  581.     };
  582.  
  583.         readl(intel_private.gtt+j-1);
  584. }
  585.  
  586. static void intel_gtt_insert_pages(unsigned int first_entry,
  587.                                    unsigned int num_entries,
  588.                    dma_addr_t *pages,
  589.                                    unsigned int flags)
  590. {
  591.     int i, j;
  592.  
  593.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  594.         dma_addr_t addr = pages[i];
  595.         intel_private.driver->write_entry(addr,
  596.                           j, flags);
  597.     }
  598.     readl(intel_private.gtt+j-1);
  599. }
  600.  
  601.  
  602. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  603. {
  604.         unsigned int i;
  605.  
  606.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  607.                 intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
  608.                                                   i, 0);
  609.         }
  610.         readl(intel_private.gtt+i-1);
  611. }
  612.  
  613. static void intel_i9xx_setup_flush(void)
  614. {
  615.     /* return if already configured */
  616.     if (intel_private.ifp_resource.start)
  617.         return;
  618.  
  619.     if (INTEL_GTT_GEN == 6)
  620.         return;
  621.  
  622.     /* setup a resource for this object */
  623. //    intel_private.ifp_resource.name = "Intel Flush Page";
  624. //    intel_private.ifp_resource.flags = IORESOURCE_MEM;
  625.  
  626.     intel_private.resource_valid = 0;
  627.  
  628.     /* Setup chipset flush for 915 */
  629. //    if (IS_G33 || INTEL_GTT_GEN >= 4) {
  630. //        intel_i965_g33_setup_chipset_flush();
  631. //    } else {
  632. //        intel_i915_setup_chipset_flush();
  633. //    }
  634.  
  635. //    if (intel_private.ifp_resource.start)
  636. //        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  637.     if (!intel_private.i9xx_flush_page)
  638.         dev_err(&intel_private.pcidev->dev,
  639.             "can't ioremap flush page - no chipset flushing\n");
  640. }
  641.  
  642. static void i9xx_cleanup(void)
  643. {
  644.         if (intel_private.i9xx_flush_page)
  645.                 iounmap(intel_private.i9xx_flush_page);
  646. //      if (intel_private.resource_valid)
  647. //              release_resource(&intel_private.ifp_resource);
  648.         intel_private.ifp_resource.start = 0;
  649.         intel_private.resource_valid = 0;
  650. }
  651.  
  652. static void i9xx_chipset_flush(void)
  653. {
  654.     if (intel_private.i9xx_flush_page)
  655.         writel(1, intel_private.i9xx_flush_page);
  656. }
  657.  
  658. static void i965_write_entry(dma_addr_t addr,
  659.                              unsigned int entry,
  660.                              unsigned int flags)
  661. {
  662.         u32 pte_flags;
  663.  
  664.         pte_flags = I810_PTE_VALID;
  665.         if (flags == AGP_USER_CACHED_MEMORY)
  666.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  667.  
  668.         /* Shift high bits down */
  669.         addr |= (addr >> 28) & 0xf0;
  670.         writel(addr | pte_flags, intel_private.gtt + entry);
  671. }
  672.  
  673. static bool gen6_check_flags(unsigned int flags)
  674. {
  675.     return true;
  676. }
  677.  
  678. static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
  679.                                 unsigned int flags)
  680. {
  681.         unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  682.         unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  683.         u32 pte_flags;
  684.  
  685.         if (type_mask == AGP_USER_MEMORY)
  686.                 pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
  687.         else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  688.                 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
  689.                 if (gfdt)
  690.                         pte_flags |= GEN6_PTE_GFDT;
  691.         } else { /* set 'normal'/'cached' to LLC by default */
  692.                 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  693.                 if (gfdt)
  694.                         pte_flags |= GEN6_PTE_GFDT;
  695.         }
  696.  
  697.         /* gen6 has bit11-4 for physical addr bit39-32 */
  698.         addr |= (addr >> 28) & 0xff0;
  699.         writel(addr | pte_flags, intel_private.gtt + entry);
  700. }
  701.  
  702. static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
  703.                  unsigned int flags)
  704. {
  705.     unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  706.     unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  707.     u32 pte_flags;
  708.  
  709.     if (type_mask == AGP_USER_MEMORY)
  710.         pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
  711.     else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  712.         pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
  713.         if (gfdt)
  714.             pte_flags |= GEN6_PTE_GFDT;
  715.     } else { /* set 'normal'/'cached' to LLC by default */
  716.         pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  717.         if (gfdt)
  718.             pte_flags |= GEN6_PTE_GFDT;
  719.     }
  720.  
  721.     /* gen6 has bit11-4 for physical addr bit39-32 */
  722.     addr |= (addr >> 28) & 0xff0;
  723.     writel(addr | pte_flags, intel_private.gtt + entry);
  724. }
  725.  
  726. static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
  727.                                    unsigned int flags)
  728. {
  729.         unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  730.         unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  731.         u32 pte_flags;
  732.  
  733.         if (type_mask == AGP_USER_MEMORY)
  734.                 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
  735.         else {
  736.                 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  737.                 if (gfdt)
  738.                         pte_flags |= GEN6_PTE_GFDT;
  739.         }
  740.  
  741.         /* gen6 has bit11-4 for physical addr bit39-32 */
  742.         addr |= (addr >> 28) & 0xff0;
  743.         writel(addr | pte_flags, intel_private.gtt + entry);
  744.  
  745.         writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
  746. }
  747.  
  748. static void gen6_cleanup(void)
  749. {
  750. }
  751.  
  752. /* Certain Gen5 chipsets require require idling the GPU before
  753.  * unmapping anything from the GTT when VT-d is enabled.
  754.  */
  755. static inline int needs_idle_maps(void)
  756. {
  757. #ifdef CONFIG_INTEL_IOMMU
  758.         const unsigned short gpu_devid = intel_private.pcidev->device;
  759.  
  760.         /* Query intel_iommu to see if we need the workaround. Presumably that
  761.          * was loaded first.
  762.          */
  763.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  764.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  765.              intel_iommu_gfx_mapped)
  766.                 return 1;
  767. #endif
  768.         return 0;
  769. }
  770.  
  771. static int i9xx_setup(void)
  772. {
  773.     u32 reg_addr;
  774.         int size = KB(512);
  775.  
  776.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  777.  
  778.     reg_addr &= 0xfff80000;
  779.  
  780.         if (INTEL_GTT_GEN >= 7)
  781.                 size = MB(2);
  782.  
  783.         intel_private.registers = ioremap(reg_addr, size);
  784.     if (!intel_private.registers)
  785.         return -ENOMEM;
  786.  
  787.     if (INTEL_GTT_GEN == 3) {
  788.         u32 gtt_addr;
  789.  
  790.         pci_read_config_dword(intel_private.pcidev,
  791.                       I915_PTEADDR, &gtt_addr);
  792.         intel_private.gtt_bus_addr = gtt_addr;
  793.     } else {
  794.         u32 gtt_offset;
  795.  
  796.         switch (INTEL_GTT_GEN) {
  797.         case 5:
  798.         case 6:
  799.                 case 7:
  800.             gtt_offset = MB(2);
  801.             break;
  802.         case 4:
  803.         default:
  804.             gtt_offset =  KB(512);
  805.             break;
  806.         }
  807.         intel_private.gtt_bus_addr = reg_addr + gtt_offset;
  808.     }
  809.  
  810.         if (needs_idle_maps())
  811.                 intel_private.base.do_idle_maps = 1;
  812.  
  813.     intel_i9xx_setup_flush();
  814.  
  815.     return 0;
  816. }
  817.  
  818. static const struct intel_gtt_driver i915_gtt_driver = {
  819.         .gen = 3,
  820.         .has_pgtbl_enable = 1,
  821.         .setup = i9xx_setup,
  822.         .cleanup = i9xx_cleanup,
  823.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  824.         .write_entry = i830_write_entry,
  825.         .dma_mask_size = 32,
  826.         .check_flags = i830_check_flags,
  827.         .chipset_flush = i9xx_chipset_flush,
  828. };
  829. static const struct intel_gtt_driver g33_gtt_driver = {
  830.         .gen = 3,
  831.         .is_g33 = 1,
  832.         .setup = i9xx_setup,
  833.         .cleanup = i9xx_cleanup,
  834.         .write_entry = i965_write_entry,
  835.         .dma_mask_size = 36,
  836.         .check_flags = i830_check_flags,
  837.         .chipset_flush = i9xx_chipset_flush,
  838. };
  839. static const struct intel_gtt_driver pineview_gtt_driver = {
  840.         .gen = 3,
  841.         .is_pineview = 1, .is_g33 = 1,
  842.         .setup = i9xx_setup,
  843.         .cleanup = i9xx_cleanup,
  844.         .write_entry = i965_write_entry,
  845.         .dma_mask_size = 36,
  846.         .check_flags = i830_check_flags,
  847.         .chipset_flush = i9xx_chipset_flush,
  848. };
  849. static const struct intel_gtt_driver i965_gtt_driver = {
  850.         .gen = 4,
  851.         .has_pgtbl_enable = 1,
  852.         .setup = i9xx_setup,
  853.         .cleanup = i9xx_cleanup,
  854.         .write_entry = i965_write_entry,
  855.         .dma_mask_size = 36,
  856.         .check_flags = i830_check_flags,
  857.         .chipset_flush = i9xx_chipset_flush,
  858. };
  859. static const struct intel_gtt_driver g4x_gtt_driver = {
  860.         .gen = 5,
  861.         .setup = i9xx_setup,
  862.         .cleanup = i9xx_cleanup,
  863.         .write_entry = i965_write_entry,
  864.         .dma_mask_size = 36,
  865.         .check_flags = i830_check_flags,
  866.         .chipset_flush = i9xx_chipset_flush,
  867. };
  868. static const struct intel_gtt_driver ironlake_gtt_driver = {
  869.         .gen = 5,
  870.         .is_ironlake = 1,
  871.         .setup = i9xx_setup,
  872.         .cleanup = i9xx_cleanup,
  873.         .write_entry = i965_write_entry,
  874.         .dma_mask_size = 36,
  875.         .check_flags = i830_check_flags,
  876.         .chipset_flush = i9xx_chipset_flush,
  877. };
  878. static const struct intel_gtt_driver sandybridge_gtt_driver = {
  879.     .gen = 6,
  880.     .setup = i9xx_setup,
  881.     .cleanup = gen6_cleanup,
  882.     .write_entry = gen6_write_entry,
  883.     .dma_mask_size = 40,
  884.     .check_flags = gen6_check_flags,
  885.     .chipset_flush = i9xx_chipset_flush,
  886. };
  887. static const struct intel_gtt_driver haswell_gtt_driver = {
  888.         .gen = 6,
  889.         .setup = i9xx_setup,
  890.         .cleanup = gen6_cleanup,
  891.         .write_entry = haswell_write_entry,
  892.         .dma_mask_size = 40,
  893.         .check_flags = gen6_check_flags,
  894.         .chipset_flush = i9xx_chipset_flush,
  895. };
  896. static const struct intel_gtt_driver valleyview_gtt_driver = {
  897.         .gen = 7,
  898.         .setup = i9xx_setup,
  899.         .cleanup = gen6_cleanup,
  900.         .write_entry = valleyview_write_entry,
  901.         .dma_mask_size = 40,
  902.         .check_flags = gen6_check_flags,
  903. };
  904.  
  905. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  906.  * driver and gmch_driver must be non-null, and find_gmch will determine
  907.  * which one should be used if a gmch_chip_id is present.
  908.  */
  909. static const struct intel_gtt_driver_description {
  910.     unsigned int gmch_chip_id;
  911.     char *name;
  912.     const struct intel_gtt_driver *gtt_driver;
  913. } intel_gtt_chipsets[] = {
  914.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  915.                 &i915_gtt_driver },
  916.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  917.                 &i915_gtt_driver },
  918.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  919.                 &i915_gtt_driver },
  920.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  921.                 &i915_gtt_driver },
  922.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  923.                 &i915_gtt_driver },
  924.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  925.                 &i915_gtt_driver },
  926.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  927.                 &i965_gtt_driver },
  928.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  929.                 &i965_gtt_driver },
  930.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  931.                 &i965_gtt_driver },
  932.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  933.                 &i965_gtt_driver },
  934.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  935.                 &i965_gtt_driver },
  936.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  937.                 &i965_gtt_driver },
  938.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  939.                 &g33_gtt_driver },
  940.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  941.                 &g33_gtt_driver },
  942.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  943.                 &g33_gtt_driver },
  944.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  945.                 &pineview_gtt_driver },
  946.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  947.                 &pineview_gtt_driver },
  948.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  949.                 &g4x_gtt_driver },
  950.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  951.                 &g4x_gtt_driver },
  952.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  953.                 &g4x_gtt_driver },
  954.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  955.                 &g4x_gtt_driver },
  956.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  957.                 &g4x_gtt_driver },
  958.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  959.                 &g4x_gtt_driver },
  960.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  961.                 &g4x_gtt_driver },
  962.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  963.             "HD Graphics", &ironlake_gtt_driver },
  964.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  965.             "HD Graphics", &ironlake_gtt_driver },
  966.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
  967.         "Sandybridge", &sandybridge_gtt_driver },
  968.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
  969.         "Sandybridge", &sandybridge_gtt_driver },
  970.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
  971.         "Sandybridge", &sandybridge_gtt_driver },
  972.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
  973.         "Sandybridge", &sandybridge_gtt_driver },
  974.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
  975.         "Sandybridge", &sandybridge_gtt_driver },
  976.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
  977.         "Sandybridge", &sandybridge_gtt_driver },
  978.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
  979.         "Sandybridge", &sandybridge_gtt_driver },
  980.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
  981.             "Ivybridge", &sandybridge_gtt_driver },
  982.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
  983.             "Ivybridge", &sandybridge_gtt_driver },
  984.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
  985.             "Ivybridge", &sandybridge_gtt_driver },
  986.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
  987.             "Ivybridge", &sandybridge_gtt_driver },
  988.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
  989.             "Ivybridge", &sandybridge_gtt_driver },
  990.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
  991.             "Ivybridge", &sandybridge_gtt_driver },
  992.         { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
  993.             "ValleyView", &valleyview_gtt_driver },
  994.         { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
  995.             "Haswell", &haswell_gtt_driver },
  996.         { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
  997.             "Haswell", &haswell_gtt_driver },
  998.         { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
  999.             "Haswell", &haswell_gtt_driver },
  1000.         { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
  1001.             "Haswell", &haswell_gtt_driver },
  1002.         { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
  1003.             "Haswell", &haswell_gtt_driver },
  1004.         { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
  1005.             "Haswell", &haswell_gtt_driver },
  1006.         { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
  1007.             "Haswell", &haswell_gtt_driver },
  1008.         { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
  1009.             "Haswell", &haswell_gtt_driver },
  1010.         { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
  1011.             "Haswell", &haswell_gtt_driver },
  1012.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
  1013.             "Haswell", &haswell_gtt_driver },
  1014.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
  1015.             "Haswell", &haswell_gtt_driver },
  1016.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
  1017.             "Haswell", &haswell_gtt_driver },
  1018.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
  1019.             "Haswell", &haswell_gtt_driver },
  1020.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
  1021.             "Haswell", &haswell_gtt_driver },
  1022.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
  1023.             "Haswell", &haswell_gtt_driver },
  1024.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
  1025.             "Haswell", &haswell_gtt_driver },
  1026.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
  1027.             "Haswell", &haswell_gtt_driver },
  1028.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
  1029.             "Haswell", &haswell_gtt_driver },
  1030.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
  1031.             "Haswell", &haswell_gtt_driver },
  1032.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
  1033.             "Haswell", &haswell_gtt_driver },
  1034.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
  1035.             "Haswell", &haswell_gtt_driver },
  1036.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
  1037.             "Haswell", &haswell_gtt_driver },
  1038.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
  1039.             "Haswell", &haswell_gtt_driver },
  1040.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
  1041.             "Haswell", &haswell_gtt_driver },
  1042.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
  1043.             "Haswell", &haswell_gtt_driver },
  1044.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
  1045.             "Haswell", &haswell_gtt_driver },
  1046.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
  1047.             "Haswell", &haswell_gtt_driver },
  1048.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
  1049.             "Haswell", &haswell_gtt_driver },
  1050.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
  1051.             "Haswell", &haswell_gtt_driver },
  1052.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
  1053.             "Haswell", &haswell_gtt_driver },
  1054.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
  1055.             "Haswell", &haswell_gtt_driver },
  1056.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
  1057.             "Haswell", &haswell_gtt_driver },
  1058.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
  1059.             "Haswell", &haswell_gtt_driver },
  1060.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
  1061.             "Haswell", &haswell_gtt_driver },
  1062.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
  1063.             "Haswell", &haswell_gtt_driver },
  1064.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
  1065.             "Haswell", &haswell_gtt_driver },
  1066.     { 0, NULL, NULL }
  1067. };
  1068.  
  1069. static int find_gmch(u16 device)
  1070. {
  1071.     struct pci_dev *gmch_device;
  1072.  
  1073.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  1074.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  1075.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  1076.                          device, gmch_device);
  1077.     }
  1078.  
  1079.     if (!gmch_device)
  1080.         return 0;
  1081.  
  1082.     intel_private.pcidev = gmch_device;
  1083.     return 1;
  1084. }
  1085.  
  1086. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  1087.                       struct agp_bridge_data *bridge)
  1088. {
  1089.     int i, mask;
  1090.     intel_private.driver = NULL;
  1091.  
  1092.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  1093.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  1094.             intel_private.driver =
  1095.                 intel_gtt_chipsets[i].gtt_driver;
  1096.             break;
  1097.         }
  1098.     }
  1099.  
  1100.     if (!intel_private.driver)
  1101.         return 0;
  1102.  
  1103.         if (bridge) {
  1104.                 bridge->dev_private_data = &intel_private;
  1105.                 bridge->dev = bridge_pdev;
  1106.         }
  1107.  
  1108.     intel_private.bridge_dev = bridge_pdev;
  1109.  
  1110.     dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  1111.  
  1112.     mask = intel_private.driver->dma_mask_size;
  1113. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  1114. //        dev_err(&intel_private.pcidev->dev,
  1115. //            "set gfx device dma mask %d-bit failed!\n", mask);
  1116. //    else
  1117. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  1118. //                        DMA_BIT_MASK(mask));
  1119.  
  1120.         if (intel_gtt_init() != 0) {
  1121. //              intel_gmch_remove();
  1122.  
  1123.         return 0;
  1124.         }
  1125.  
  1126.     return 1;
  1127. }
  1128. EXPORT_SYMBOL(intel_gmch_probe);
  1129.  
  1130. const struct intel_gtt *intel_gtt_get(void)
  1131. {
  1132.     return &intel_private.base;
  1133. }
  1134. EXPORT_SYMBOL(intel_gtt_get);
  1135.  
  1136. void intel_gtt_chipset_flush(void)
  1137. {
  1138.         if (intel_private.driver->chipset_flush)
  1139.                 intel_private.driver->chipset_flush();
  1140. }
  1141. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  1142.  
  1143.  
  1144. //phys_addr_t get_bus_addr(void)
  1145. //{
  1146. //    return intel_private.gma_bus_addr;
  1147. //};
  1148.