Subversion Repositories Kolibri OS

Rev

Rev 3037 | Rev 3480 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <linux/module.h>
  19. #include <errno-base.h>
  20. #include <linux/pci.h>
  21. #include <linux/kernel.h>
  22. #include <linux/export.h>
  23. #include <linux/scatterlist.h>
  24.  
  25. //#include <linux/pagemap.h>
  26. //#include <linux/agp_backend.h>
  27. //#include <asm/smp.h>
  28. #include <linux/spinlock.h>
  29. #include "agp.h"
  30. #include "intel-agp.h"
  31. #include <drm/intel-gtt.h>
  32.  
  33. #include <syscall.h>
  34.  
  35. struct pci_dev *
  36. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  37.  
  38.  
  39. #define PCI_VENDOR_ID_INTEL             0x8086
  40. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  41. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  42. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  43. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  44. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  45. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  46.  
  47.  
  48. #define AGP_NORMAL_MEMORY 0
  49.  
  50. #define AGP_USER_TYPES (1 << 16)
  51. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  52. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  53.  
  54.  
  55.  
  56. /*
  57.  * If we have Intel graphics, we're not going to have anything other than
  58.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  59.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  60.  * Only newer chipsets need to bother with this, of course.
  61.  */
  62. #ifdef CONFIG_INTEL_IOMMU
  63. #define USE_PCI_DMA_API 1
  64. #else
  65. #define USE_PCI_DMA_API 0
  66. #endif
  67.  
  68. struct intel_gtt_driver {
  69.     unsigned int gen : 8;
  70.     unsigned int is_g33 : 1;
  71.     unsigned int is_pineview : 1;
  72.     unsigned int is_ironlake : 1;
  73.     unsigned int has_pgtbl_enable : 1;
  74.     unsigned int dma_mask_size : 8;
  75.     /* Chipset specific GTT setup */
  76.     int (*setup)(void);
  77.     /* This should undo anything done in ->setup() save the unmapping
  78.      * of the mmio register file, that's done in the generic code. */
  79.     void (*cleanup)(void);
  80.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  81.     /* Flags is a more or less chipset specific opaque value.
  82.      * For chipsets that need to support old ums (non-gem) code, this
  83.      * needs to be identical to the various supported agp memory types! */
  84.     bool (*check_flags)(unsigned int flags);
  85.     void (*chipset_flush)(void);
  86. };
  87.  
  88. static struct _intel_private {
  89.     struct intel_gtt base;
  90.     const struct intel_gtt_driver *driver;
  91.     struct pci_dev *pcidev; /* device one */
  92.     struct pci_dev *bridge_dev;
  93.     u8 __iomem *registers;
  94.     phys_addr_t gtt_bus_addr;
  95.     u32 PGETBL_save;
  96.     u32 __iomem *gtt;       /* I915G */
  97.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  98.     int num_dcache_entries;
  99.     void __iomem *i9xx_flush_page;
  100.     char *i81x_gtt_table;
  101.     struct resource ifp_resource;
  102.     int resource_valid;
  103.     struct page *scratch_page;
  104.         int refcount;
  105. } intel_private;
  106.  
  107. #define INTEL_GTT_GEN   intel_private.driver->gen
  108. #define IS_G33          intel_private.driver->is_g33
  109. #define IS_PINEVIEW     intel_private.driver->is_pineview
  110. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  111. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  112.  
  113. static int intel_gtt_setup_scratch_page(void)
  114. {
  115.         struct page *page;
  116.         dma_addr_t dma_addr;
  117.  
  118.         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  119.         if (page == NULL)
  120.         return -ENOMEM;
  121.     intel_private.base.scratch_page_dma = page_to_phys(page);
  122.  
  123.         intel_private.scratch_page = page;
  124.  
  125.     return 0;
  126. }
  127.  
  128. static unsigned int intel_gtt_stolen_size(void)
  129. {
  130.     u16 gmch_ctrl;
  131.     u8 rdct;
  132.     int local = 0;
  133.     static const int ddt[4] = { 0, 16, 32, 64 };
  134.     unsigned int stolen_size = 0;
  135.  
  136.     if (INTEL_GTT_GEN == 1)
  137.         return 0; /* no stolen mem on i81x */
  138.  
  139.     pci_read_config_word(intel_private.bridge_dev,
  140.                  I830_GMCH_CTRL, &gmch_ctrl);
  141.  
  142.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  143.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  144.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  145.         case I830_GMCH_GMS_STOLEN_512:
  146.             stolen_size = KB(512);
  147.             break;
  148.         case I830_GMCH_GMS_STOLEN_1024:
  149.             stolen_size = MB(1);
  150.             break;
  151.         case I830_GMCH_GMS_STOLEN_8192:
  152.             stolen_size = MB(8);
  153.             break;
  154.         case I830_GMCH_GMS_LOCAL:
  155.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  156.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  157.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  158.             local = 1;
  159.             break;
  160.         default:
  161.             stolen_size = 0;
  162.             break;
  163.         }
  164.     } else {
  165.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  166.         case I855_GMCH_GMS_STOLEN_1M:
  167.             stolen_size = MB(1);
  168.             break;
  169.         case I855_GMCH_GMS_STOLEN_4M:
  170.             stolen_size = MB(4);
  171.             break;
  172.         case I855_GMCH_GMS_STOLEN_8M:
  173.             stolen_size = MB(8);
  174.             break;
  175.         case I855_GMCH_GMS_STOLEN_16M:
  176.             stolen_size = MB(16);
  177.             break;
  178.         case I855_GMCH_GMS_STOLEN_32M:
  179.             stolen_size = MB(32);
  180.             break;
  181.         case I915_GMCH_GMS_STOLEN_48M:
  182.             stolen_size = MB(48);
  183.             break;
  184.         case I915_GMCH_GMS_STOLEN_64M:
  185.             stolen_size = MB(64);
  186.             break;
  187.         case G33_GMCH_GMS_STOLEN_128M:
  188.             stolen_size = MB(128);
  189.             break;
  190.         case G33_GMCH_GMS_STOLEN_256M:
  191.             stolen_size = MB(256);
  192.             break;
  193.         case INTEL_GMCH_GMS_STOLEN_96M:
  194.             stolen_size = MB(96);
  195.             break;
  196.         case INTEL_GMCH_GMS_STOLEN_160M:
  197.             stolen_size = MB(160);
  198.             break;
  199.         case INTEL_GMCH_GMS_STOLEN_224M:
  200.             stolen_size = MB(224);
  201.             break;
  202.         case INTEL_GMCH_GMS_STOLEN_352M:
  203.             stolen_size = MB(352);
  204.             break;
  205.         default:
  206.             stolen_size = 0;
  207.             break;
  208.         }
  209.     }
  210.  
  211.     if (stolen_size > 0) {
  212.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  213.                stolen_size / KB(1), local ? "local" : "stolen");
  214.     } else {
  215.                 dev_info(&intel_private.bridge_dev->dev,
  216.                        "no pre-allocated video memory detected\n");
  217.         stolen_size = 0;
  218.     }
  219.  
  220.     return stolen_size;
  221. }
  222.  
  223. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  224. {
  225.     u32 pgetbl_ctl, pgetbl_ctl2;
  226.  
  227.     /* ensure that ppgtt is disabled */
  228.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  229.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  230.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  231.  
  232.     /* write the new ggtt size */
  233.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  234.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  235.     pgetbl_ctl |= size_flag;
  236.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  237. }
  238.  
  239. static unsigned int i965_gtt_total_entries(void)
  240. {
  241.     int size;
  242.     u32 pgetbl_ctl;
  243.     u16 gmch_ctl;
  244.  
  245.     pci_read_config_word(intel_private.bridge_dev,
  246.                  I830_GMCH_CTRL, &gmch_ctl);
  247.  
  248.     if (INTEL_GTT_GEN == 5) {
  249.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  250.         case G4x_GMCH_SIZE_1M:
  251.         case G4x_GMCH_SIZE_VT_1M:
  252.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  253.             break;
  254.         case G4x_GMCH_SIZE_VT_1_5M:
  255.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  256.             break;
  257.         case G4x_GMCH_SIZE_2M:
  258.         case G4x_GMCH_SIZE_VT_2M:
  259.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  260.             break;
  261.         }
  262.     }
  263.  
  264.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  265.  
  266.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  267.     case I965_PGETBL_SIZE_128KB:
  268.         size = KB(128);
  269.         break;
  270.     case I965_PGETBL_SIZE_256KB:
  271.         size = KB(256);
  272.         break;
  273.     case I965_PGETBL_SIZE_512KB:
  274.         size = KB(512);
  275.         break;
  276.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  277.     case I965_PGETBL_SIZE_1MB:
  278.         size = KB(1024);
  279.         break;
  280.     case I965_PGETBL_SIZE_2MB:
  281.         size = KB(2048);
  282.         break;
  283.     case I965_PGETBL_SIZE_1_5MB:
  284.         size = KB(1024 + 512);
  285.         break;
  286.     default:
  287.                 dev_info(&intel_private.pcidev->dev,
  288.                          "unknown page table size, assuming 512KB\n");
  289.         size = KB(512);
  290.     }
  291.  
  292.     return size/4;
  293. }
  294.  
  295. static unsigned int intel_gtt_total_entries(void)
  296. {
  297.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  298.         return i965_gtt_total_entries();
  299.         else {
  300.         /* On previous hardware, the GTT size was just what was
  301.          * required to map the aperture.
  302.          */
  303.         return intel_private.base.gtt_mappable_entries;
  304.     }
  305. }
  306.  
  307. static unsigned int intel_gtt_mappable_entries(void)
  308. {
  309.     unsigned int aperture_size;
  310.  
  311.     if (INTEL_GTT_GEN == 1) {
  312.         u32 smram_miscc;
  313.  
  314.         pci_read_config_dword(intel_private.bridge_dev,
  315.                       I810_SMRAM_MISCC, &smram_miscc);
  316.  
  317.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  318.                 == I810_GFX_MEM_WIN_32M)
  319.             aperture_size = MB(32);
  320.         else
  321.             aperture_size = MB(64);
  322.     } else if (INTEL_GTT_GEN == 2) {
  323.         u16 gmch_ctrl;
  324.  
  325.         pci_read_config_word(intel_private.bridge_dev,
  326.                      I830_GMCH_CTRL, &gmch_ctrl);
  327.  
  328.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  329.             aperture_size = MB(64);
  330.         else
  331.             aperture_size = MB(128);
  332.     } else {
  333.         /* 9xx supports large sizes, just look at the length */
  334.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  335.     }
  336.  
  337.     return aperture_size >> PAGE_SHIFT;
  338. }
  339.  
  340. static void intel_gtt_teardown_scratch_page(void)
  341. {
  342.    // FreePage(intel_private.scratch_page_dma);
  343. }
  344.  
  345. static void intel_gtt_cleanup(void)
  346. {
  347.     intel_private.driver->cleanup();
  348.  
  349.         iounmap(intel_private.gtt);
  350.         iounmap(intel_private.registers);
  351.  
  352.         intel_gtt_teardown_scratch_page();
  353. }
  354.  
  355. static int intel_gtt_init(void)
  356. {
  357.         u32 gma_addr;
  358.     u32 gtt_map_size;
  359.     int ret;
  360.  
  361.     ret = intel_private.driver->setup();
  362.     if (ret != 0)
  363.         return ret;
  364.  
  365.     intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
  366.     intel_private.base.gtt_total_entries = intel_gtt_total_entries();
  367.  
  368.     /* save the PGETBL reg for resume */
  369.     intel_private.PGETBL_save =
  370.         readl(intel_private.registers+I810_PGETBL_CTL)
  371.             & ~I810_PGETBL_ENABLED;
  372.     /* we only ever restore the register when enabling the PGTBL... */
  373.     if (HAS_PGTBL_EN)
  374.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  375.  
  376.         dev_info(&intel_private.bridge_dev->dev,
  377.                         "detected gtt size: %dK total, %dK mappable\n",
  378.             intel_private.base.gtt_total_entries * 4,
  379.             intel_private.base.gtt_mappable_entries * 4);
  380.  
  381.     gtt_map_size = intel_private.base.gtt_total_entries * 4;
  382.  
  383.         intel_private.gtt = NULL;
  384.         if (intel_private.gtt == NULL)
  385.                 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
  386.                                             gtt_map_size);
  387.         if (intel_private.gtt == NULL) {
  388.         intel_private.driver->cleanup();
  389.                 iounmap(intel_private.registers);
  390.         return -ENOMEM;
  391.     }
  392.         intel_private.base.gtt = intel_private.gtt;
  393.  
  394.     asm volatile("wbinvd");
  395.  
  396.     intel_private.base.stolen_size = intel_gtt_stolen_size();
  397.  
  398.     intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  399.  
  400.     ret = intel_gtt_setup_scratch_page();
  401.     if (ret != 0) {
  402.         intel_gtt_cleanup();
  403.         return ret;
  404.     }
  405.  
  406.         if (INTEL_GTT_GEN <= 2)
  407.                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  408.                                       &gma_addr);
  409.         else
  410.                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  411.                                       &gma_addr);
  412.  
  413.         intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  414.  
  415.     return 0;
  416. }
  417.  
  418. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  419.                              unsigned int flags)
  420. {
  421.         u32 pte_flags = I810_PTE_VALID;
  422.  
  423.         if (flags ==  AGP_USER_CACHED_MEMORY)
  424.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  425.  
  426.         writel(addr | pte_flags, intel_private.gtt + entry);
  427. }
  428.  
  429. bool intel_enable_gtt(void)
  430. {
  431.     u8 __iomem *reg;
  432.  
  433.     if (INTEL_GTT_GEN == 2) {
  434.         u16 gmch_ctrl;
  435.  
  436.         pci_read_config_word(intel_private.bridge_dev,
  437.                      I830_GMCH_CTRL, &gmch_ctrl);
  438.         gmch_ctrl |= I830_GMCH_ENABLED;
  439.         pci_write_config_word(intel_private.bridge_dev,
  440.                       I830_GMCH_CTRL, gmch_ctrl);
  441.  
  442.         pci_read_config_word(intel_private.bridge_dev,
  443.                      I830_GMCH_CTRL, &gmch_ctrl);
  444.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  445.                         dev_err(&intel_private.pcidev->dev,
  446.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  447.                 gmch_ctrl);
  448.             return false;
  449.         }
  450.     }
  451.  
  452.     /* On the resume path we may be adjusting the PGTBL value, so
  453.      * be paranoid and flush all chipset write buffers...
  454.      */
  455.     if (INTEL_GTT_GEN >= 3)
  456.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  457.  
  458.     reg = intel_private.registers+I810_PGETBL_CTL;
  459.     writel(intel_private.PGETBL_save, reg);
  460.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  461.                 dev_err(&intel_private.pcidev->dev,
  462.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  463.             readl(reg), intel_private.PGETBL_save);
  464.         return false;
  465.     }
  466.  
  467.     if (INTEL_GTT_GEN >= 3)
  468.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  469.  
  470.     return true;
  471. }
  472.  
  473. static bool i830_check_flags(unsigned int flags)
  474. {
  475.         switch (flags) {
  476.         case 0:
  477.         case AGP_PHYS_MEMORY:
  478.         case AGP_USER_CACHED_MEMORY:
  479.         case AGP_USER_MEMORY:
  480.                 return true;
  481.         }
  482.  
  483.         return false;
  484. }
  485.  
  486. void intel_gtt_insert_sg_entries(struct sg_table *st,
  487.                                  unsigned int pg_start,
  488.                                  unsigned int flags)
  489. {
  490.         struct scatterlist *sg;
  491.         unsigned int len, m;
  492.     int i, j;
  493.  
  494.         j = pg_start;
  495.  
  496.         /* sg may merge pages, but we have to separate
  497.          * per-page addr for GTT */
  498.         for_each_sg(st->sgl, sg, st->nents, i) {
  499.                 len = sg_dma_len(sg) >> PAGE_SHIFT;
  500.                 for (m = 0; m < len; m++) {
  501.                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  502.         intel_private.driver->write_entry(addr, j, flags);
  503.         j++;
  504.                 }
  505.         }
  506.         readl(intel_private.gtt+j-1);
  507. }
  508. EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
  509.  
  510. static void intel_gtt_insert_pages(unsigned int first_entry,
  511.                                    unsigned int num_entries,
  512.                                    struct page **pages,
  513.                                    unsigned int flags)
  514. {
  515.     int i, j;
  516.  
  517.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  518.                 dma_addr_t addr = page_to_phys(pages[i]);
  519.         intel_private.driver->write_entry(addr,
  520.                           j, flags);
  521.     }
  522.     readl(intel_private.gtt+j-1);
  523. }
  524.  
  525.  
  526. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  527. {
  528.         unsigned int i;
  529.  
  530.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  531.                 intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
  532.                                                   i, 0);
  533.         }
  534.         readl(intel_private.gtt+i-1);
  535. }
  536.  
  537. static void intel_i9xx_setup_flush(void)
  538. {
  539.     /* return if already configured */
  540.     if (intel_private.ifp_resource.start)
  541.         return;
  542.  
  543.     if (INTEL_GTT_GEN == 6)
  544.         return;
  545.  
  546.     /* setup a resource for this object */
  547. //    intel_private.ifp_resource.name = "Intel Flush Page";
  548. //    intel_private.ifp_resource.flags = IORESOURCE_MEM;
  549.  
  550.     intel_private.resource_valid = 0;
  551.  
  552.     /* Setup chipset flush for 915 */
  553. //    if (IS_G33 || INTEL_GTT_GEN >= 4) {
  554. //        intel_i965_g33_setup_chipset_flush();
  555. //    } else {
  556. //        intel_i915_setup_chipset_flush();
  557. //    }
  558.  
  559. //    if (intel_private.ifp_resource.start)
  560. //        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  561.     if (!intel_private.i9xx_flush_page)
  562.         dev_err(&intel_private.pcidev->dev,
  563.             "can't ioremap flush page - no chipset flushing\n");
  564. }
  565.  
  566. static void i9xx_cleanup(void)
  567. {
  568.         if (intel_private.i9xx_flush_page)
  569.                 iounmap(intel_private.i9xx_flush_page);
  570. //      if (intel_private.resource_valid)
  571. //              release_resource(&intel_private.ifp_resource);
  572.         intel_private.ifp_resource.start = 0;
  573.         intel_private.resource_valid = 0;
  574. }
  575.  
  576. static void i9xx_chipset_flush(void)
  577. {
  578.     if (intel_private.i9xx_flush_page)
  579.         writel(1, intel_private.i9xx_flush_page);
  580. }
  581.  
  582. static void i965_write_entry(dma_addr_t addr,
  583.                              unsigned int entry,
  584.                              unsigned int flags)
  585. {
  586.         u32 pte_flags;
  587.  
  588.         pte_flags = I810_PTE_VALID;
  589.         if (flags == AGP_USER_CACHED_MEMORY)
  590.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  591.  
  592.         /* Shift high bits down */
  593.         addr |= (addr >> 28) & 0xf0;
  594.         writel(addr | pte_flags, intel_private.gtt + entry);
  595. }
  596.  
  597. /* Certain Gen5 chipsets require require idling the GPU before
  598.  * unmapping anything from the GTT when VT-d is enabled.
  599.  */
  600. static inline int needs_idle_maps(void)
  601. {
  602. #ifdef CONFIG_INTEL_IOMMU
  603.         const unsigned short gpu_devid = intel_private.pcidev->device;
  604.  
  605.         /* Query intel_iommu to see if we need the workaround. Presumably that
  606.          * was loaded first.
  607.          */
  608.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  609.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  610.              intel_iommu_gfx_mapped)
  611.                 return 1;
  612. #endif
  613.         return 0;
  614. }
  615.  
  616. static int i9xx_setup(void)
  617. {
  618.         u32 reg_addr, gtt_addr;
  619.         int size = KB(512);
  620.  
  621.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  622.  
  623.     reg_addr &= 0xfff80000;
  624.  
  625.         intel_private.registers = ioremap(reg_addr, size);
  626.     if (!intel_private.registers)
  627.         return -ENOMEM;
  628.  
  629.         switch (INTEL_GTT_GEN) {
  630.         case 3:
  631.         pci_read_config_dword(intel_private.pcidev,
  632.                       I915_PTEADDR, &gtt_addr);
  633.         intel_private.gtt_bus_addr = gtt_addr;
  634.                 break;
  635.         case 5:
  636.                 intel_private.gtt_bus_addr = reg_addr + MB(2);
  637.             break;
  638.         default:
  639.                 intel_private.gtt_bus_addr = reg_addr + KB(512);
  640.             break;
  641.         }
  642.  
  643.         if (needs_idle_maps())
  644.                 intel_private.base.do_idle_maps = 1;
  645.  
  646.     intel_i9xx_setup_flush();
  647.  
  648.     return 0;
  649. }
  650.  
  651. static const struct intel_gtt_driver i915_gtt_driver = {
  652.         .gen = 3,
  653.         .has_pgtbl_enable = 1,
  654.         .setup = i9xx_setup,
  655.         .cleanup = i9xx_cleanup,
  656.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  657.         .write_entry = i830_write_entry,
  658.         .dma_mask_size = 32,
  659.         .check_flags = i830_check_flags,
  660.         .chipset_flush = i9xx_chipset_flush,
  661. };
  662. static const struct intel_gtt_driver g33_gtt_driver = {
  663.         .gen = 3,
  664.         .is_g33 = 1,
  665.         .setup = i9xx_setup,
  666.         .cleanup = i9xx_cleanup,
  667.         .write_entry = i965_write_entry,
  668.         .dma_mask_size = 36,
  669.         .check_flags = i830_check_flags,
  670.         .chipset_flush = i9xx_chipset_flush,
  671. };
  672. static const struct intel_gtt_driver pineview_gtt_driver = {
  673.         .gen = 3,
  674.         .is_pineview = 1, .is_g33 = 1,
  675.         .setup = i9xx_setup,
  676.         .cleanup = i9xx_cleanup,
  677.         .write_entry = i965_write_entry,
  678.         .dma_mask_size = 36,
  679.         .check_flags = i830_check_flags,
  680.         .chipset_flush = i9xx_chipset_flush,
  681. };
  682. static const struct intel_gtt_driver i965_gtt_driver = {
  683.         .gen = 4,
  684.         .has_pgtbl_enable = 1,
  685.         .setup = i9xx_setup,
  686.         .cleanup = i9xx_cleanup,
  687.         .write_entry = i965_write_entry,
  688.         .dma_mask_size = 36,
  689.         .check_flags = i830_check_flags,
  690.         .chipset_flush = i9xx_chipset_flush,
  691. };
  692. static const struct intel_gtt_driver g4x_gtt_driver = {
  693.         .gen = 5,
  694.         .setup = i9xx_setup,
  695.         .cleanup = i9xx_cleanup,
  696.         .write_entry = i965_write_entry,
  697.         .dma_mask_size = 36,
  698.         .check_flags = i830_check_flags,
  699.         .chipset_flush = i9xx_chipset_flush,
  700. };
  701. static const struct intel_gtt_driver ironlake_gtt_driver = {
  702.         .gen = 5,
  703.         .is_ironlake = 1,
  704.         .setup = i9xx_setup,
  705.         .cleanup = i9xx_cleanup,
  706.         .write_entry = i965_write_entry,
  707.         .dma_mask_size = 36,
  708.         .check_flags = i830_check_flags,
  709.         .chipset_flush = i9xx_chipset_flush,
  710. };
  711.  
  712. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  713.  * driver and gmch_driver must be non-null, and find_gmch will determine
  714.  * which one should be used if a gmch_chip_id is present.
  715.  */
  716. static const struct intel_gtt_driver_description {
  717.     unsigned int gmch_chip_id;
  718.     char *name;
  719.     const struct intel_gtt_driver *gtt_driver;
  720. } intel_gtt_chipsets[] = {
  721.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  722.                 &i915_gtt_driver },
  723.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  724.                 &i915_gtt_driver },
  725.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  726.                 &i915_gtt_driver },
  727.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  728.                 &i915_gtt_driver },
  729.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  730.                 &i915_gtt_driver },
  731.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  732.                 &i915_gtt_driver },
  733.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  734.                 &i965_gtt_driver },
  735.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  736.                 &i965_gtt_driver },
  737.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  738.                 &i965_gtt_driver },
  739.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  740.                 &i965_gtt_driver },
  741.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  742.                 &i965_gtt_driver },
  743.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  744.                 &i965_gtt_driver },
  745.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  746.                 &g33_gtt_driver },
  747.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  748.                 &g33_gtt_driver },
  749.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  750.                 &g33_gtt_driver },
  751.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  752.                 &pineview_gtt_driver },
  753.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  754.                 &pineview_gtt_driver },
  755.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  756.                 &g4x_gtt_driver },
  757.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  758.                 &g4x_gtt_driver },
  759.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  760.                 &g4x_gtt_driver },
  761.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  762.                 &g4x_gtt_driver },
  763.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  764.                 &g4x_gtt_driver },
  765.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  766.                 &g4x_gtt_driver },
  767.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  768.                 &g4x_gtt_driver },
  769.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  770.             "HD Graphics", &ironlake_gtt_driver },
  771.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  772.             "HD Graphics", &ironlake_gtt_driver },
  773.     { 0, NULL, NULL }
  774. };
  775.  
  776. static int find_gmch(u16 device)
  777. {
  778.     struct pci_dev *gmch_device;
  779.  
  780.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  781.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  782.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  783.                          device, gmch_device);
  784.     }
  785.  
  786.     if (!gmch_device)
  787.         return 0;
  788.  
  789.     intel_private.pcidev = gmch_device;
  790.     return 1;
  791. }
  792.  
  793. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  794.                       struct agp_bridge_data *bridge)
  795. {
  796.     int i, mask;
  797.     intel_private.driver = NULL;
  798.  
  799.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  800.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  801.             intel_private.driver =
  802.                 intel_gtt_chipsets[i].gtt_driver;
  803.             break;
  804.         }
  805.     }
  806.  
  807.     if (!intel_private.driver)
  808.         return 0;
  809.  
  810.         if (bridge) {
  811.                 bridge->dev_private_data = &intel_private;
  812.                 bridge->dev = bridge_pdev;
  813.         }
  814.  
  815.     intel_private.bridge_dev = bridge_pdev;
  816.  
  817.         dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  818.  
  819.     mask = intel_private.driver->dma_mask_size;
  820. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  821. //        dev_err(&intel_private.pcidev->dev,
  822. //            "set gfx device dma mask %d-bit failed!\n", mask);
  823. //    else
  824. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  825. //                        DMA_BIT_MASK(mask));
  826.  
  827.         if (intel_gtt_init() != 0) {
  828. //              intel_gmch_remove();
  829.  
  830.         return 0;
  831.         }
  832.  
  833.     return 1;
  834. }
  835. EXPORT_SYMBOL(intel_gmch_probe);
  836.  
  837. struct intel_gtt *intel_gtt_get(void)
  838. {
  839.     return &intel_private.base;
  840. }
  841. EXPORT_SYMBOL(intel_gtt_get);
  842.  
  843. void intel_gtt_chipset_flush(void)
  844. {
  845.         if (intel_private.driver->chipset_flush)
  846.                 intel_private.driver->chipset_flush();
  847. }
  848. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  849.  
  850.  
  851. MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
  852. MODULE_LICENSE("GPL and additional rights");
  853.