Subversion Repositories Kolibri OS

Rev

Rev 2344 | Rev 3037 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <linux/module.h>
  19. #include <errno-base.h>
  20. #include <linux/pci.h>
  21. #include <linux/kernel.h>
  22. #include <linux/export.h>
  23. //#include <linux/pagemap.h>
  24. //#include <linux/agp_backend.h>
  25. //#include <asm/smp.h>
  26. #include <linux/spinlock.h>
  27. #include "agp.h"
  28. #include "intel-agp.h"
  29. #include "intel-gtt.h"
  30.  
  31. #include <syscall.h>
  32.  
  33. struct pci_dev *
  34. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  35.  
  36.  
  37. #define PCI_VENDOR_ID_INTEL             0x8086
  38. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  39. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  40. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  41. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  42. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  43. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  44.  
  45.  
  46. #define AGP_NORMAL_MEMORY 0
  47.  
  48. #define AGP_USER_TYPES (1 << 16)
  49. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  50. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  51.  
  52.  
  53.  
  54. /*
  55.  * If we have Intel graphics, we're not going to have anything other than
  56.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  57.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  58.  * Only newer chipsets need to bother with this, of course.
  59.  */
  60. #ifdef CONFIG_INTEL_IOMMU
  61. #define USE_PCI_DMA_API 1
  62. #else
  63. #define USE_PCI_DMA_API 0
  64. #endif
  65.  
  66. struct intel_gtt_driver {
  67.     unsigned int gen : 8;
  68.     unsigned int is_g33 : 1;
  69.     unsigned int is_pineview : 1;
  70.     unsigned int is_ironlake : 1;
  71.     unsigned int has_pgtbl_enable : 1;
  72.     unsigned int dma_mask_size : 8;
  73.     /* Chipset specific GTT setup */
  74.     int (*setup)(void);
  75.     /* This should undo anything done in ->setup() save the unmapping
  76.      * of the mmio register file, that's done in the generic code. */
  77.     void (*cleanup)(void);
  78.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  79.     /* Flags is a more or less chipset specific opaque value.
  80.      * For chipsets that need to support old ums (non-gem) code, this
  81.      * needs to be identical to the various supported agp memory types! */
  82.     bool (*check_flags)(unsigned int flags);
  83.     void (*chipset_flush)(void);
  84. };
  85.  
  86. static struct _intel_private {
  87.     struct intel_gtt base;
  88.     const struct intel_gtt_driver *driver;
  89.     struct pci_dev *pcidev; /* device one */
  90.     struct pci_dev *bridge_dev;
  91.     u8 __iomem *registers;
  92.     phys_addr_t gtt_bus_addr;
  93.     u32 PGETBL_save;
  94.     u32 __iomem *gtt;       /* I915G */
  95.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  96.     int num_dcache_entries;
  97.     void __iomem *i9xx_flush_page;
  98.     char *i81x_gtt_table;
  99.     struct resource ifp_resource;
  100.     int resource_valid;
  101.     struct page *scratch_page;
  102.         int refcount;
  103. } intel_private;
  104.  
  105. #define INTEL_GTT_GEN   intel_private.driver->gen
  106. #define IS_G33          intel_private.driver->is_g33
  107. #define IS_PINEVIEW     intel_private.driver->is_pineview
  108. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  109. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  110.  
  111. static int intel_gtt_setup_scratch_page(void)
  112. {
  113.         dma_addr_t dma_addr;
  114.  
  115.     dma_addr = AllocPage();
  116.     if (dma_addr == 0)
  117.         return -ENOMEM;
  118.  
  119.     intel_private.base.scratch_page_dma = dma_addr;
  120.     intel_private.scratch_page = NULL;
  121.  
  122.     return 0;
  123. }
  124.  
  125. static unsigned int intel_gtt_stolen_size(void)
  126. {
  127.     u16 gmch_ctrl;
  128.     u8 rdct;
  129.     int local = 0;
  130.     static const int ddt[4] = { 0, 16, 32, 64 };
  131.     unsigned int stolen_size = 0;
  132.  
  133.     if (INTEL_GTT_GEN == 1)
  134.         return 0; /* no stolen mem on i81x */
  135.  
  136.     pci_read_config_word(intel_private.bridge_dev,
  137.                  I830_GMCH_CTRL, &gmch_ctrl);
  138.  
  139.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  140.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  141.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  142.         case I830_GMCH_GMS_STOLEN_512:
  143.             stolen_size = KB(512);
  144.             break;
  145.         case I830_GMCH_GMS_STOLEN_1024:
  146.             stolen_size = MB(1);
  147.             break;
  148.         case I830_GMCH_GMS_STOLEN_8192:
  149.             stolen_size = MB(8);
  150.             break;
  151.         case I830_GMCH_GMS_LOCAL:
  152.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  153.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  154.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  155.             local = 1;
  156.             break;
  157.         default:
  158.             stolen_size = 0;
  159.             break;
  160.         }
  161.     } else if (INTEL_GTT_GEN == 6) {
  162.         /*
  163.          * SandyBridge has new memory control reg at 0x50.w
  164.          */
  165.         u16 snb_gmch_ctl;
  166.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  167.         switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
  168.         case SNB_GMCH_GMS_STOLEN_32M:
  169.             stolen_size = MB(32);
  170.             break;
  171.         case SNB_GMCH_GMS_STOLEN_64M:
  172.             stolen_size = MB(64);
  173.             break;
  174.         case SNB_GMCH_GMS_STOLEN_96M:
  175.             stolen_size = MB(96);
  176.             break;
  177.         case SNB_GMCH_GMS_STOLEN_128M:
  178.             stolen_size = MB(128);
  179.             break;
  180.         case SNB_GMCH_GMS_STOLEN_160M:
  181.             stolen_size = MB(160);
  182.             break;
  183.         case SNB_GMCH_GMS_STOLEN_192M:
  184.             stolen_size = MB(192);
  185.             break;
  186.         case SNB_GMCH_GMS_STOLEN_224M:
  187.             stolen_size = MB(224);
  188.             break;
  189.         case SNB_GMCH_GMS_STOLEN_256M:
  190.             stolen_size = MB(256);
  191.             break;
  192.         case SNB_GMCH_GMS_STOLEN_288M:
  193.             stolen_size = MB(288);
  194.             break;
  195.         case SNB_GMCH_GMS_STOLEN_320M:
  196.             stolen_size = MB(320);
  197.             break;
  198.         case SNB_GMCH_GMS_STOLEN_352M:
  199.             stolen_size = MB(352);
  200.             break;
  201.         case SNB_GMCH_GMS_STOLEN_384M:
  202.             stolen_size = MB(384);
  203.             break;
  204.         case SNB_GMCH_GMS_STOLEN_416M:
  205.             stolen_size = MB(416);
  206.             break;
  207.         case SNB_GMCH_GMS_STOLEN_448M:
  208.             stolen_size = MB(448);
  209.             break;
  210.         case SNB_GMCH_GMS_STOLEN_480M:
  211.             stolen_size = MB(480);
  212.             break;
  213.         case SNB_GMCH_GMS_STOLEN_512M:
  214.             stolen_size = MB(512);
  215.             break;
  216.         }
  217.     } else {
  218.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  219.         case I855_GMCH_GMS_STOLEN_1M:
  220.             stolen_size = MB(1);
  221.             break;
  222.         case I855_GMCH_GMS_STOLEN_4M:
  223.             stolen_size = MB(4);
  224.             break;
  225.         case I855_GMCH_GMS_STOLEN_8M:
  226.             stolen_size = MB(8);
  227.             break;
  228.         case I855_GMCH_GMS_STOLEN_16M:
  229.             stolen_size = MB(16);
  230.             break;
  231.         case I855_GMCH_GMS_STOLEN_32M:
  232.             stolen_size = MB(32);
  233.             break;
  234.         case I915_GMCH_GMS_STOLEN_48M:
  235.             stolen_size = MB(48);
  236.             break;
  237.         case I915_GMCH_GMS_STOLEN_64M:
  238.             stolen_size = MB(64);
  239.             break;
  240.         case G33_GMCH_GMS_STOLEN_128M:
  241.             stolen_size = MB(128);
  242.             break;
  243.         case G33_GMCH_GMS_STOLEN_256M:
  244.             stolen_size = MB(256);
  245.             break;
  246.         case INTEL_GMCH_GMS_STOLEN_96M:
  247.             stolen_size = MB(96);
  248.             break;
  249.         case INTEL_GMCH_GMS_STOLEN_160M:
  250.             stolen_size = MB(160);
  251.             break;
  252.         case INTEL_GMCH_GMS_STOLEN_224M:
  253.             stolen_size = MB(224);
  254.             break;
  255.         case INTEL_GMCH_GMS_STOLEN_352M:
  256.             stolen_size = MB(352);
  257.             break;
  258.         default:
  259.             stolen_size = 0;
  260.             break;
  261.         }
  262.     }
  263.  
  264.     if (stolen_size > 0) {
  265.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  266.                stolen_size / KB(1), local ? "local" : "stolen");
  267.     } else {
  268.                 dev_info(&intel_private.bridge_dev->dev,
  269.                        "no pre-allocated video memory detected\n");
  270.         stolen_size = 0;
  271.     }
  272.  
  273.     return stolen_size;
  274. }
  275.  
  276. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  277. {
  278.     u32 pgetbl_ctl, pgetbl_ctl2;
  279.  
  280.     /* ensure that ppgtt is disabled */
  281.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  282.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  283.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  284.  
  285.     /* write the new ggtt size */
  286.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  287.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  288.     pgetbl_ctl |= size_flag;
  289.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  290. }
  291.  
  292. static unsigned int i965_gtt_total_entries(void)
  293. {
  294.     int size;
  295.     u32 pgetbl_ctl;
  296.     u16 gmch_ctl;
  297.  
  298.     pci_read_config_word(intel_private.bridge_dev,
  299.                  I830_GMCH_CTRL, &gmch_ctl);
  300.  
  301.     if (INTEL_GTT_GEN == 5) {
  302.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  303.         case G4x_GMCH_SIZE_1M:
  304.         case G4x_GMCH_SIZE_VT_1M:
  305.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  306.             break;
  307.         case G4x_GMCH_SIZE_VT_1_5M:
  308.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  309.             break;
  310.         case G4x_GMCH_SIZE_2M:
  311.         case G4x_GMCH_SIZE_VT_2M:
  312.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  313.             break;
  314.         }
  315.     }
  316.  
  317.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  318.  
  319.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  320.     case I965_PGETBL_SIZE_128KB:
  321.         size = KB(128);
  322.         break;
  323.     case I965_PGETBL_SIZE_256KB:
  324.         size = KB(256);
  325.         break;
  326.     case I965_PGETBL_SIZE_512KB:
  327.         size = KB(512);
  328.         break;
  329.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  330.     case I965_PGETBL_SIZE_1MB:
  331.         size = KB(1024);
  332.         break;
  333.     case I965_PGETBL_SIZE_2MB:
  334.         size = KB(2048);
  335.         break;
  336.     case I965_PGETBL_SIZE_1_5MB:
  337.         size = KB(1024 + 512);
  338.         break;
  339.     default:
  340.                 dev_info(&intel_private.pcidev->dev,
  341.                          "unknown page table size, assuming 512KB\n");
  342.         size = KB(512);
  343.     }
  344.  
  345.     return size/4;
  346. }
  347.  
  348. static unsigned int intel_gtt_total_entries(void)
  349. {
  350.     int size;
  351.  
  352.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  353.         return i965_gtt_total_entries();
  354.     else if (INTEL_GTT_GEN == 6) {
  355.         u16 snb_gmch_ctl;
  356.  
  357.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  358.         switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
  359.         default:
  360.         case SNB_GTT_SIZE_0M:
  361.             printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
  362.             size = MB(0);
  363.             break;
  364.         case SNB_GTT_SIZE_1M:
  365.             size = MB(1);
  366.             break;
  367.         case SNB_GTT_SIZE_2M:
  368.             size = MB(2);
  369.             break;
  370.         }
  371.         return size/4;
  372.     } else {
  373.         /* On previous hardware, the GTT size was just what was
  374.          * required to map the aperture.
  375.          */
  376.         return intel_private.base.gtt_mappable_entries;
  377.     }
  378. }
  379.  
  380. static unsigned int intel_gtt_mappable_entries(void)
  381. {
  382.     unsigned int aperture_size;
  383.  
  384.     if (INTEL_GTT_GEN == 1) {
  385.         u32 smram_miscc;
  386.  
  387.         pci_read_config_dword(intel_private.bridge_dev,
  388.                       I810_SMRAM_MISCC, &smram_miscc);
  389.  
  390.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  391.                 == I810_GFX_MEM_WIN_32M)
  392.             aperture_size = MB(32);
  393.         else
  394.             aperture_size = MB(64);
  395.     } else if (INTEL_GTT_GEN == 2) {
  396.         u16 gmch_ctrl;
  397.  
  398.         pci_read_config_word(intel_private.bridge_dev,
  399.                      I830_GMCH_CTRL, &gmch_ctrl);
  400.  
  401.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  402.             aperture_size = MB(64);
  403.         else
  404.             aperture_size = MB(128);
  405.     } else {
  406.         /* 9xx supports large sizes, just look at the length */
  407.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  408.     }
  409.  
  410.     return aperture_size >> PAGE_SHIFT;
  411. }
  412.  
  413. static void intel_gtt_teardown_scratch_page(void)
  414. {
  415.    // FreePage(intel_private.scratch_page_dma);
  416. }
  417.  
  418. static void intel_gtt_cleanup(void)
  419. {
  420.     intel_private.driver->cleanup();
  421.  
  422.         iounmap(intel_private.gtt);
  423.         iounmap(intel_private.registers);
  424.  
  425.         intel_gtt_teardown_scratch_page();
  426. }
  427.  
  428. static int intel_gtt_init(void)
  429. {
  430.         u32 gma_addr;
  431.     u32 gtt_map_size;
  432.     int ret;
  433.  
  434.     ENTER();
  435.  
  436.     ret = intel_private.driver->setup();
  437.     if (ret != 0)
  438.     {
  439.         LEAVE();
  440.         return ret;
  441.     };
  442.  
  443.  
  444.     intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
  445.     intel_private.base.gtt_total_entries = intel_gtt_total_entries();
  446.  
  447.     /* save the PGETBL reg for resume */
  448.     intel_private.PGETBL_save =
  449.         readl(intel_private.registers+I810_PGETBL_CTL)
  450.             & ~I810_PGETBL_ENABLED;
  451.     /* we only ever restore the register when enabling the PGTBL... */
  452.     if (HAS_PGTBL_EN)
  453.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  454.  
  455.         dev_info(&intel_private.bridge_dev->dev,
  456.                         "detected gtt size: %dK total, %dK mappable\n",
  457.             intel_private.base.gtt_total_entries * 4,
  458.             intel_private.base.gtt_mappable_entries * 4);
  459.  
  460.     gtt_map_size = intel_private.base.gtt_total_entries * 4;
  461.  
  462.         intel_private.gtt = NULL;
  463. //   if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
  464. //       intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
  465. //                          gtt_map_size);
  466.         if (intel_private.gtt == NULL)
  467.                 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
  468.                                             gtt_map_size);
  469.         if (intel_private.gtt == NULL) {
  470.         intel_private.driver->cleanup();
  471.                 iounmap(intel_private.registers);
  472.         return -ENOMEM;
  473.     }
  474.         intel_private.base.gtt = intel_private.gtt;
  475.  
  476.     asm volatile("wbinvd");
  477.  
  478.     intel_private.base.stolen_size = intel_gtt_stolen_size();
  479.  
  480.     intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  481.  
  482.     ret = intel_gtt_setup_scratch_page();
  483.     if (ret != 0) {
  484.         intel_gtt_cleanup();
  485.         return ret;
  486.     }
  487.  
  488.         if (INTEL_GTT_GEN <= 2)
  489.                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  490.                                       &gma_addr);
  491.         else
  492.                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  493.                                       &gma_addr);
  494.  
  495.         intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  496.  
  497.     LEAVE();
  498.  
  499.     return 0;
  500. }
  501.  
  502. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  503.                              unsigned int flags)
  504. {
  505.         u32 pte_flags = I810_PTE_VALID;
  506.  
  507.         if (flags ==  AGP_USER_CACHED_MEMORY)
  508.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  509.  
  510.         writel(addr | pte_flags, intel_private.gtt + entry);
  511. }
  512.  
  513. bool intel_enable_gtt(void)
  514. {
  515.     u8 __iomem *reg;
  516.  
  517.     if (INTEL_GTT_GEN >= 6)
  518.         return true;
  519.  
  520.     if (INTEL_GTT_GEN == 2) {
  521.         u16 gmch_ctrl;
  522.  
  523.         pci_read_config_word(intel_private.bridge_dev,
  524.                      I830_GMCH_CTRL, &gmch_ctrl);
  525.         gmch_ctrl |= I830_GMCH_ENABLED;
  526.         pci_write_config_word(intel_private.bridge_dev,
  527.                       I830_GMCH_CTRL, gmch_ctrl);
  528.  
  529.         pci_read_config_word(intel_private.bridge_dev,
  530.                      I830_GMCH_CTRL, &gmch_ctrl);
  531.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  532.                         dev_err(&intel_private.pcidev->dev,
  533.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  534.                 gmch_ctrl);
  535.             return false;
  536.         }
  537.     }
  538.  
  539.     /* On the resume path we may be adjusting the PGTBL value, so
  540.      * be paranoid and flush all chipset write buffers...
  541.      */
  542.     if (INTEL_GTT_GEN >= 3)
  543.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  544.  
  545.     reg = intel_private.registers+I810_PGETBL_CTL;
  546.     writel(intel_private.PGETBL_save, reg);
  547.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  548.                 dev_err(&intel_private.pcidev->dev,
  549.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  550.             readl(reg), intel_private.PGETBL_save);
  551.         return false;
  552.     }
  553.  
  554.     if (INTEL_GTT_GEN >= 3)
  555.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  556.  
  557.     return true;
  558. }
  559.  
  560. static bool i830_check_flags(unsigned int flags)
  561. {
  562.         switch (flags) {
  563.         case 0:
  564.         case AGP_PHYS_MEMORY:
  565.         case AGP_USER_CACHED_MEMORY:
  566.         case AGP_USER_MEMORY:
  567.                 return true;
  568.         }
  569.  
  570.         return false;
  571. }
  572.  
  573. void intel_gtt_insert_sg_entries(struct pagelist *st,
  574.                                  unsigned int pg_start,
  575.                                  unsigned int flags)
  576. {
  577.     int i, j;
  578.  
  579.         j = pg_start;
  580.  
  581.     for(i = 0; i < st->nents; i++)
  582.     {
  583.         dma_addr_t addr = st->page[i];
  584.         intel_private.driver->write_entry(addr, j, flags);
  585.         j++;
  586.     };
  587.  
  588.         readl(intel_private.gtt+j-1);
  589. }
  590.  
  591. static void intel_gtt_insert_pages(unsigned int first_entry,
  592.                                    unsigned int num_entries,
  593.                    dma_addr_t *pages,
  594.                                    unsigned int flags)
  595. {
  596.     int i, j;
  597.  
  598.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  599.         dma_addr_t addr = pages[i];
  600.         intel_private.driver->write_entry(addr,
  601.                           j, flags);
  602.     }
  603.     readl(intel_private.gtt+j-1);
  604. }
  605.  
  606.  
  607. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  608. {
  609.         unsigned int i;
  610.  
  611.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  612.                 intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
  613.                                                   i, 0);
  614.         }
  615.         readl(intel_private.gtt+i-1);
  616. }
  617.  
  618. static void intel_i9xx_setup_flush(void)
  619. {
  620.     /* return if already configured */
  621.     if (intel_private.ifp_resource.start)
  622.         return;
  623.  
  624.     if (INTEL_GTT_GEN == 6)
  625.         return;
  626.  
  627.     /* setup a resource for this object */
  628. //    intel_private.ifp_resource.name = "Intel Flush Page";
  629. //    intel_private.ifp_resource.flags = IORESOURCE_MEM;
  630.  
  631.     intel_private.resource_valid = 0;
  632.  
  633.     /* Setup chipset flush for 915 */
  634. //    if (IS_G33 || INTEL_GTT_GEN >= 4) {
  635. //        intel_i965_g33_setup_chipset_flush();
  636. //    } else {
  637. //        intel_i915_setup_chipset_flush();
  638. //    }
  639.  
  640. //    if (intel_private.ifp_resource.start)
  641. //        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  642.     if (!intel_private.i9xx_flush_page)
  643.         dev_err(&intel_private.pcidev->dev,
  644.             "can't ioremap flush page - no chipset flushing\n");
  645. }
  646.  
  647. static void i9xx_cleanup(void)
  648. {
  649.         if (intel_private.i9xx_flush_page)
  650.                 iounmap(intel_private.i9xx_flush_page);
  651. //      if (intel_private.resource_valid)
  652. //              release_resource(&intel_private.ifp_resource);
  653.         intel_private.ifp_resource.start = 0;
  654.         intel_private.resource_valid = 0;
  655. }
  656.  
  657. static void i9xx_chipset_flush(void)
  658. {
  659.     if (intel_private.i9xx_flush_page)
  660.         writel(1, intel_private.i9xx_flush_page);
  661. }
  662.  
  663. static void i965_write_entry(dma_addr_t addr,
  664.                              unsigned int entry,
  665.                              unsigned int flags)
  666. {
  667.         u32 pte_flags;
  668.  
  669.         pte_flags = I810_PTE_VALID;
  670.         if (flags == AGP_USER_CACHED_MEMORY)
  671.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  672.  
  673.         /* Shift high bits down */
  674.         addr |= (addr >> 28) & 0xf0;
  675.         writel(addr | pte_flags, intel_private.gtt + entry);
  676. }
  677.  
  678. static bool gen6_check_flags(unsigned int flags)
  679. {
  680.     return true;
  681. }
  682.  
  683. static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
  684.                                 unsigned int flags)
  685. {
  686.         unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  687.         unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  688.         u32 pte_flags;
  689.  
  690.         if (type_mask == AGP_USER_MEMORY)
  691.                 pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
  692.         else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  693.                 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
  694.                 if (gfdt)
  695.                         pte_flags |= GEN6_PTE_GFDT;
  696.         } else { /* set 'normal'/'cached' to LLC by default */
  697.                 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  698.                 if (gfdt)
  699.                         pte_flags |= GEN6_PTE_GFDT;
  700.         }
  701.  
  702.         /* gen6 has bit11-4 for physical addr bit39-32 */
  703.         addr |= (addr >> 28) & 0xff0;
  704.         writel(addr | pte_flags, intel_private.gtt + entry);
  705. }
  706.  
  707. static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
  708.                  unsigned int flags)
  709. {
  710.     unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  711.     unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  712.     u32 pte_flags;
  713.  
  714.     if (type_mask == AGP_USER_MEMORY)
  715.         pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
  716.     else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  717.         pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
  718.         if (gfdt)
  719.             pte_flags |= GEN6_PTE_GFDT;
  720.     } else { /* set 'normal'/'cached' to LLC by default */
  721.         pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  722.         if (gfdt)
  723.             pte_flags |= GEN6_PTE_GFDT;
  724.     }
  725.  
  726.     /* gen6 has bit11-4 for physical addr bit39-32 */
  727.     addr |= (addr >> 28) & 0xff0;
  728.     writel(addr | pte_flags, intel_private.gtt + entry);
  729. }
  730.  
  731. static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
  732.                                    unsigned int flags)
  733. {
  734.         unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  735.         unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  736.         u32 pte_flags;
  737.  
  738.         if (type_mask == AGP_USER_MEMORY)
  739.                 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
  740.         else {
  741.                 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  742.                 if (gfdt)
  743.                         pte_flags |= GEN6_PTE_GFDT;
  744.         }
  745.  
  746.         /* gen6 has bit11-4 for physical addr bit39-32 */
  747.         addr |= (addr >> 28) & 0xff0;
  748.         writel(addr | pte_flags, intel_private.gtt + entry);
  749.  
  750.         writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
  751. }
  752.  
  753. static void gen6_cleanup(void)
  754. {
  755. }
  756.  
  757. /* Certain Gen5 chipsets require require idling the GPU before
  758.  * unmapping anything from the GTT when VT-d is enabled.
  759.  */
  760. static inline int needs_idle_maps(void)
  761. {
  762. #ifdef CONFIG_INTEL_IOMMU
  763.         const unsigned short gpu_devid = intel_private.pcidev->device;
  764.  
  765.         /* Query intel_iommu to see if we need the workaround. Presumably that
  766.          * was loaded first.
  767.          */
  768.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  769.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  770.              intel_iommu_gfx_mapped)
  771.                 return 1;
  772. #endif
  773.         return 0;
  774. }
  775.  
  776. static int i9xx_setup(void)
  777. {
  778.     u32 reg_addr;
  779.         int size = KB(512);
  780.  
  781.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  782.  
  783.     reg_addr &= 0xfff80000;
  784.  
  785.         if (INTEL_GTT_GEN >= 7)
  786.                 size = MB(2);
  787.  
  788.         intel_private.registers = ioremap(reg_addr, size);
  789.     if (!intel_private.registers)
  790.         return -ENOMEM;
  791.  
  792.     if (INTEL_GTT_GEN == 3) {
  793.         u32 gtt_addr;
  794.  
  795.         pci_read_config_dword(intel_private.pcidev,
  796.                       I915_PTEADDR, &gtt_addr);
  797.         intel_private.gtt_bus_addr = gtt_addr;
  798.     } else {
  799.         u32 gtt_offset;
  800.  
  801.         switch (INTEL_GTT_GEN) {
  802.         case 5:
  803.         case 6:
  804.                 case 7:
  805.             gtt_offset = MB(2);
  806.             break;
  807.         case 4:
  808.         default:
  809.             gtt_offset =  KB(512);
  810.             break;
  811.         }
  812.         intel_private.gtt_bus_addr = reg_addr + gtt_offset;
  813.     }
  814.  
  815.         if (needs_idle_maps())
  816.                 intel_private.base.do_idle_maps = 1;
  817.  
  818.     intel_i9xx_setup_flush();
  819.  
  820.     return 0;
  821. }
  822.  
  823. static const struct intel_gtt_driver i915_gtt_driver = {
  824.         .gen = 3,
  825.         .has_pgtbl_enable = 1,
  826.         .setup = i9xx_setup,
  827.         .cleanup = i9xx_cleanup,
  828.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  829.         .write_entry = i830_write_entry,
  830.         .dma_mask_size = 32,
  831.         .check_flags = i830_check_flags,
  832.         .chipset_flush = i9xx_chipset_flush,
  833. };
  834. static const struct intel_gtt_driver g33_gtt_driver = {
  835.         .gen = 3,
  836.         .is_g33 = 1,
  837.         .setup = i9xx_setup,
  838.         .cleanup = i9xx_cleanup,
  839.         .write_entry = i965_write_entry,
  840.         .dma_mask_size = 36,
  841.         .check_flags = i830_check_flags,
  842.         .chipset_flush = i9xx_chipset_flush,
  843. };
  844. static const struct intel_gtt_driver pineview_gtt_driver = {
  845.         .gen = 3,
  846.         .is_pineview = 1, .is_g33 = 1,
  847.         .setup = i9xx_setup,
  848.         .cleanup = i9xx_cleanup,
  849.         .write_entry = i965_write_entry,
  850.         .dma_mask_size = 36,
  851.         .check_flags = i830_check_flags,
  852.         .chipset_flush = i9xx_chipset_flush,
  853. };
  854. static const struct intel_gtt_driver i965_gtt_driver = {
  855.         .gen = 4,
  856.         .has_pgtbl_enable = 1,
  857.         .setup = i9xx_setup,
  858.         .cleanup = i9xx_cleanup,
  859.         .write_entry = i965_write_entry,
  860.         .dma_mask_size = 36,
  861.         .check_flags = i830_check_flags,
  862.         .chipset_flush = i9xx_chipset_flush,
  863. };
  864. static const struct intel_gtt_driver g4x_gtt_driver = {
  865.         .gen = 5,
  866.         .setup = i9xx_setup,
  867.         .cleanup = i9xx_cleanup,
  868.         .write_entry = i965_write_entry,
  869.         .dma_mask_size = 36,
  870.         .check_flags = i830_check_flags,
  871.         .chipset_flush = i9xx_chipset_flush,
  872. };
  873. static const struct intel_gtt_driver ironlake_gtt_driver = {
  874.         .gen = 5,
  875.         .is_ironlake = 1,
  876.         .setup = i9xx_setup,
  877.         .cleanup = i9xx_cleanup,
  878.         .write_entry = i965_write_entry,
  879.         .dma_mask_size = 36,
  880.         .check_flags = i830_check_flags,
  881.         .chipset_flush = i9xx_chipset_flush,
  882. };
  883. static const struct intel_gtt_driver sandybridge_gtt_driver = {
  884.     .gen = 6,
  885.     .setup = i9xx_setup,
  886.     .cleanup = gen6_cleanup,
  887.     .write_entry = gen6_write_entry,
  888.     .dma_mask_size = 40,
  889.     .check_flags = gen6_check_flags,
  890.     .chipset_flush = i9xx_chipset_flush,
  891. };
  892. static const struct intel_gtt_driver haswell_gtt_driver = {
  893.         .gen = 6,
  894.         .setup = i9xx_setup,
  895.         .cleanup = gen6_cleanup,
  896.         .write_entry = haswell_write_entry,
  897.         .dma_mask_size = 40,
  898.         .check_flags = gen6_check_flags,
  899.         .chipset_flush = i9xx_chipset_flush,
  900. };
  901. static const struct intel_gtt_driver valleyview_gtt_driver = {
  902.         .gen = 7,
  903.         .setup = i9xx_setup,
  904.         .cleanup = gen6_cleanup,
  905.         .write_entry = valleyview_write_entry,
  906.         .dma_mask_size = 40,
  907.         .check_flags = gen6_check_flags,
  908. };
  909.  
  910. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  911.  * driver and gmch_driver must be non-null, and find_gmch will determine
  912.  * which one should be used if a gmch_chip_id is present.
  913.  */
  914. static const struct intel_gtt_driver_description {
  915.     unsigned int gmch_chip_id;
  916.     char *name;
  917.     const struct intel_gtt_driver *gtt_driver;
  918. } intel_gtt_chipsets[] = {
  919.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  920.                 &i915_gtt_driver },
  921.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  922.                 &i915_gtt_driver },
  923.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  924.                 &i915_gtt_driver },
  925.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  926.                 &i915_gtt_driver },
  927.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  928.                 &i915_gtt_driver },
  929.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  930.                 &i915_gtt_driver },
  931.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  932.                 &i965_gtt_driver },
  933.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  934.                 &i965_gtt_driver },
  935.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  936.                 &i965_gtt_driver },
  937.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  938.                 &i965_gtt_driver },
  939.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  940.                 &i965_gtt_driver },
  941.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  942.                 &i965_gtt_driver },
  943.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  944.                 &g33_gtt_driver },
  945.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  946.                 &g33_gtt_driver },
  947.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  948.                 &g33_gtt_driver },
  949.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  950.                 &pineview_gtt_driver },
  951.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  952.                 &pineview_gtt_driver },
  953.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  954.                 &g4x_gtt_driver },
  955.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  956.                 &g4x_gtt_driver },
  957.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  958.                 &g4x_gtt_driver },
  959.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  960.                 &g4x_gtt_driver },
  961.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  962.                 &g4x_gtt_driver },
  963.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  964.                 &g4x_gtt_driver },
  965.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  966.                 &g4x_gtt_driver },
  967.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  968.             "HD Graphics", &ironlake_gtt_driver },
  969.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  970.             "HD Graphics", &ironlake_gtt_driver },
  971.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
  972.         "Sandybridge", &sandybridge_gtt_driver },
  973.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
  974.         "Sandybridge", &sandybridge_gtt_driver },
  975.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
  976.         "Sandybridge", &sandybridge_gtt_driver },
  977.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
  978.         "Sandybridge", &sandybridge_gtt_driver },
  979.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
  980.         "Sandybridge", &sandybridge_gtt_driver },
  981.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
  982.         "Sandybridge", &sandybridge_gtt_driver },
  983.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
  984.         "Sandybridge", &sandybridge_gtt_driver },
  985.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
  986.             "Ivybridge", &sandybridge_gtt_driver },
  987.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
  988.             "Ivybridge", &sandybridge_gtt_driver },
  989.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
  990.             "Ivybridge", &sandybridge_gtt_driver },
  991.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
  992.             "Ivybridge", &sandybridge_gtt_driver },
  993.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
  994.             "Ivybridge", &sandybridge_gtt_driver },
  995.         { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
  996.             "Ivybridge", &sandybridge_gtt_driver },
  997.         { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
  998.             "ValleyView", &valleyview_gtt_driver },
  999.         { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
  1000.             "Haswell", &haswell_gtt_driver },
  1001.         { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
  1002.             "Haswell", &haswell_gtt_driver },
  1003.         { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
  1004.             "Haswell", &haswell_gtt_driver },
  1005.         { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
  1006.             "Haswell", &haswell_gtt_driver },
  1007.         { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
  1008.             "Haswell", &haswell_gtt_driver },
  1009.         { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
  1010.             "Haswell", &haswell_gtt_driver },
  1011.         { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
  1012.             "Haswell", &haswell_gtt_driver },
  1013.         { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
  1014.             "Haswell", &haswell_gtt_driver },
  1015.         { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
  1016.             "Haswell", &haswell_gtt_driver },
  1017.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
  1018.             "Haswell", &haswell_gtt_driver },
  1019.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
  1020.             "Haswell", &haswell_gtt_driver },
  1021.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
  1022.             "Haswell", &haswell_gtt_driver },
  1023.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
  1024.             "Haswell", &haswell_gtt_driver },
  1025.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
  1026.             "Haswell", &haswell_gtt_driver },
  1027.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
  1028.             "Haswell", &haswell_gtt_driver },
  1029.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
  1030.             "Haswell", &haswell_gtt_driver },
  1031.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
  1032.             "Haswell", &haswell_gtt_driver },
  1033.         { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
  1034.             "Haswell", &haswell_gtt_driver },
  1035.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
  1036.             "Haswell", &haswell_gtt_driver },
  1037.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
  1038.             "Haswell", &haswell_gtt_driver },
  1039.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
  1040.             "Haswell", &haswell_gtt_driver },
  1041.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
  1042.             "Haswell", &haswell_gtt_driver },
  1043.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
  1044.             "Haswell", &haswell_gtt_driver },
  1045.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
  1046.             "Haswell", &haswell_gtt_driver },
  1047.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
  1048.             "Haswell", &haswell_gtt_driver },
  1049.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
  1050.             "Haswell", &haswell_gtt_driver },
  1051.         { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
  1052.             "Haswell", &haswell_gtt_driver },
  1053.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
  1054.             "Haswell", &haswell_gtt_driver },
  1055.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
  1056.             "Haswell", &haswell_gtt_driver },
  1057.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
  1058.             "Haswell", &haswell_gtt_driver },
  1059.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
  1060.             "Haswell", &haswell_gtt_driver },
  1061.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
  1062.             "Haswell", &haswell_gtt_driver },
  1063.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
  1064.             "Haswell", &haswell_gtt_driver },
  1065.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
  1066.             "Haswell", &haswell_gtt_driver },
  1067.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
  1068.             "Haswell", &haswell_gtt_driver },
  1069.         { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
  1070.             "Haswell", &haswell_gtt_driver },
  1071.     { 0, NULL, NULL }
  1072. };
  1073.  
  1074. static int find_gmch(u16 device)
  1075. {
  1076.     struct pci_dev *gmch_device;
  1077.  
  1078.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  1079.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  1080.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  1081.                          device, gmch_device);
  1082.     }
  1083.  
  1084.     if (!gmch_device)
  1085.         return 0;
  1086.  
  1087.     intel_private.pcidev = gmch_device;
  1088.     return 1;
  1089. }
  1090.  
  1091. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  1092.                       struct agp_bridge_data *bridge)
  1093. {
  1094.     int i, mask;
  1095.     intel_private.driver = NULL;
  1096.  
  1097.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  1098.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  1099.             intel_private.driver =
  1100.                 intel_gtt_chipsets[i].gtt_driver;
  1101.             break;
  1102.         }
  1103.     }
  1104.  
  1105.     if (!intel_private.driver)
  1106.         return 0;
  1107.  
  1108.         if (bridge) {
  1109.                 bridge->dev_private_data = &intel_private;
  1110.                 bridge->dev = bridge_pdev;
  1111.         }
  1112.  
  1113.     intel_private.bridge_dev = bridge_pdev;
  1114.  
  1115.     dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  1116.  
  1117.     mask = intel_private.driver->dma_mask_size;
  1118. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  1119. //        dev_err(&intel_private.pcidev->dev,
  1120. //            "set gfx device dma mask %d-bit failed!\n", mask);
  1121. //    else
  1122. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  1123. //                        DMA_BIT_MASK(mask));
  1124.  
  1125.         if (intel_gtt_init() != 0) {
  1126. //              intel_gmch_remove();
  1127.  
  1128.         return 0;
  1129.         }
  1130.  
  1131.     return 1;
  1132. }
  1133. EXPORT_SYMBOL(intel_gmch_probe);
  1134.  
  1135. const struct intel_gtt *intel_gtt_get(void)
  1136. {
  1137.     return &intel_private.base;
  1138. }
  1139. EXPORT_SYMBOL(intel_gtt_get);
  1140.  
  1141. void intel_gtt_chipset_flush(void)
  1142. {
  1143.         if (intel_private.driver->chipset_flush)
  1144.                 intel_private.driver->chipset_flush();
  1145. }
  1146. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  1147.  
  1148.  
  1149. //phys_addr_t get_bus_addr(void)
  1150. //{
  1151. //    return intel_private.gma_bus_addr;
  1152. //};
  1153.