Subversion Repositories Kolibri OS

Rev

Rev 4104 | Rev 5060 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <syscall.h>
  19.  
  20. #include <linux/module.h>
  21. #include <errno-base.h>
  22. #include <linux/pci.h>
  23. #include <linux/kernel.h>
  24. #include <linux/export.h>
  25. #include <linux/scatterlist.h>
  26.  
  27. //#include <linux/pagemap.h>
  28. //#include <linux/agp_backend.h>
  29. //#include <asm/smp.h>
  30. #include <linux/spinlock.h>
  31. #include "agp.h"
  32. #include "intel-agp.h"
  33. #include <drm/intel-gtt.h>
  34.  
  35.  
  36. struct pci_dev *
  37. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  38.  
  39.  
  40. #define PCI_VENDOR_ID_INTEL             0x8086
  41. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  42. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  43. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  44. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  45. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  46. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  47.  
  48.  
  49. #define AGP_NORMAL_MEMORY 0
  50.  
  51. #define AGP_USER_TYPES (1 << 16)
  52. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  53. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  54.  
  55.  
  56.  
  57. /*
  58.  * If we have Intel graphics, we're not going to have anything other than
  59.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  60.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  61.  * Only newer chipsets need to bother with this, of course.
  62.  */
  63. #ifdef CONFIG_INTEL_IOMMU
  64. #define USE_PCI_DMA_API 1
  65. #else
  66. #define USE_PCI_DMA_API 0
  67. #endif
  68.  
  69. struct intel_gtt_driver {
  70.     unsigned int gen : 8;
  71.     unsigned int is_g33 : 1;
  72.     unsigned int is_pineview : 1;
  73.     unsigned int is_ironlake : 1;
  74.     unsigned int has_pgtbl_enable : 1;
  75.     unsigned int dma_mask_size : 8;
  76.     /* Chipset specific GTT setup */
  77.     int (*setup)(void);
  78.     /* This should undo anything done in ->setup() save the unmapping
  79.      * of the mmio register file, that's done in the generic code. */
  80.     void (*cleanup)(void);
  81.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  82.     /* Flags is a more or less chipset specific opaque value.
  83.      * For chipsets that need to support old ums (non-gem) code, this
  84.      * needs to be identical to the various supported agp memory types! */
  85.     bool (*check_flags)(unsigned int flags);
  86.     void (*chipset_flush)(void);
  87. };
  88.  
  89. static struct _intel_private {
  90.     const struct intel_gtt_driver *driver;
  91.     struct pci_dev *pcidev; /* device one */
  92.     struct pci_dev *bridge_dev;
  93.     u8 __iomem *registers;
  94.     phys_addr_t gtt_bus_addr;
  95.     u32 PGETBL_save;
  96.     u32 __iomem *gtt;       /* I915G */
  97.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  98.     int num_dcache_entries;
  99.     void __iomem *i9xx_flush_page;
  100.     char *i81x_gtt_table;
  101.     struct resource ifp_resource;
  102.     int resource_valid;
  103.     struct page *scratch_page;
  104.         phys_addr_t scratch_page_dma;
  105.         int refcount;
  106.         /* Whether i915 needs to use the dmar apis or not. */
  107.         unsigned int needs_dmar : 1;
  108.         phys_addr_t gma_bus_addr;
  109.         /*  Size of memory reserved for graphics by the BIOS */
  110.         unsigned int stolen_size;
  111.         /* Total number of gtt entries. */
  112.         unsigned int gtt_total_entries;
  113.         /* Part of the gtt that is mappable by the cpu, for those chips where
  114.          * this is not the full gtt. */
  115.         unsigned int gtt_mappable_entries;
  116. } intel_private;
  117.  
  118. #define INTEL_GTT_GEN   intel_private.driver->gen
  119. #define IS_G33          intel_private.driver->is_g33
  120. #define IS_PINEVIEW     intel_private.driver->is_pineview
  121. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  122. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  123.  
  124. static int intel_gtt_setup_scratch_page(void)
  125. {
  126.         struct page *page;
  127.         dma_addr_t dma_addr;
  128.  
  129.         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  130.         if (page == NULL)
  131.         return -ENOMEM;
  132.                 intel_private.scratch_page_dma = page_to_phys(page);
  133.  
  134.         intel_private.scratch_page = page;
  135.  
  136.     return 0;
  137. }
  138.  
  139. static unsigned int intel_gtt_stolen_size(void)
  140. {
  141.     u16 gmch_ctrl;
  142.     u8 rdct;
  143.     int local = 0;
  144.     static const int ddt[4] = { 0, 16, 32, 64 };
  145.     unsigned int stolen_size = 0;
  146.  
  147.     if (INTEL_GTT_GEN == 1)
  148.         return 0; /* no stolen mem on i81x */
  149.  
  150.     pci_read_config_word(intel_private.bridge_dev,
  151.                  I830_GMCH_CTRL, &gmch_ctrl);
  152.  
  153.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  154.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  155.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  156.         case I830_GMCH_GMS_STOLEN_512:
  157.             stolen_size = KB(512);
  158.             break;
  159.         case I830_GMCH_GMS_STOLEN_1024:
  160.             stolen_size = MB(1);
  161.             break;
  162.         case I830_GMCH_GMS_STOLEN_8192:
  163.             stolen_size = MB(8);
  164.             break;
  165.         case I830_GMCH_GMS_LOCAL:
  166.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  167.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  168.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  169.             local = 1;
  170.             break;
  171.         default:
  172.             stolen_size = 0;
  173.             break;
  174.         }
  175.     } else {
  176.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  177.         case I855_GMCH_GMS_STOLEN_1M:
  178.             stolen_size = MB(1);
  179.             break;
  180.         case I855_GMCH_GMS_STOLEN_4M:
  181.             stolen_size = MB(4);
  182.             break;
  183.         case I855_GMCH_GMS_STOLEN_8M:
  184.             stolen_size = MB(8);
  185.             break;
  186.         case I855_GMCH_GMS_STOLEN_16M:
  187.             stolen_size = MB(16);
  188.             break;
  189.         case I855_GMCH_GMS_STOLEN_32M:
  190.             stolen_size = MB(32);
  191.             break;
  192.         case I915_GMCH_GMS_STOLEN_48M:
  193.             stolen_size = MB(48);
  194.             break;
  195.         case I915_GMCH_GMS_STOLEN_64M:
  196.             stolen_size = MB(64);
  197.             break;
  198.         case G33_GMCH_GMS_STOLEN_128M:
  199.             stolen_size = MB(128);
  200.             break;
  201.         case G33_GMCH_GMS_STOLEN_256M:
  202.             stolen_size = MB(256);
  203.             break;
  204.         case INTEL_GMCH_GMS_STOLEN_96M:
  205.             stolen_size = MB(96);
  206.             break;
  207.         case INTEL_GMCH_GMS_STOLEN_160M:
  208.             stolen_size = MB(160);
  209.             break;
  210.         case INTEL_GMCH_GMS_STOLEN_224M:
  211.             stolen_size = MB(224);
  212.             break;
  213.         case INTEL_GMCH_GMS_STOLEN_352M:
  214.             stolen_size = MB(352);
  215.             break;
  216.         default:
  217.             stolen_size = 0;
  218.             break;
  219.         }
  220.     }
  221.  
  222.     if (stolen_size > 0) {
  223.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  224.                stolen_size / KB(1), local ? "local" : "stolen");
  225.     } else {
  226.                 dev_info(&intel_private.bridge_dev->dev,
  227.                        "no pre-allocated video memory detected\n");
  228.         stolen_size = 0;
  229.     }
  230.  
  231.     return stolen_size;
  232. }
  233.  
  234. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  235. {
  236.     u32 pgetbl_ctl, pgetbl_ctl2;
  237.  
  238.     /* ensure that ppgtt is disabled */
  239.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  240.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  241.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  242.  
  243.     /* write the new ggtt size */
  244.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  245.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  246.     pgetbl_ctl |= size_flag;
  247.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  248. }
  249.  
  250. static unsigned int i965_gtt_total_entries(void)
  251. {
  252.     int size;
  253.     u32 pgetbl_ctl;
  254.     u16 gmch_ctl;
  255.  
  256.     pci_read_config_word(intel_private.bridge_dev,
  257.                  I830_GMCH_CTRL, &gmch_ctl);
  258.  
  259.     if (INTEL_GTT_GEN == 5) {
  260.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  261.         case G4x_GMCH_SIZE_1M:
  262.         case G4x_GMCH_SIZE_VT_1M:
  263.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  264.             break;
  265.         case G4x_GMCH_SIZE_VT_1_5M:
  266.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  267.             break;
  268.         case G4x_GMCH_SIZE_2M:
  269.         case G4x_GMCH_SIZE_VT_2M:
  270.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  271.             break;
  272.         }
  273.     }
  274.  
  275.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  276.  
  277.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  278.     case I965_PGETBL_SIZE_128KB:
  279.         size = KB(128);
  280.         break;
  281.     case I965_PGETBL_SIZE_256KB:
  282.         size = KB(256);
  283.         break;
  284.     case I965_PGETBL_SIZE_512KB:
  285.         size = KB(512);
  286.         break;
  287.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  288.     case I965_PGETBL_SIZE_1MB:
  289.         size = KB(1024);
  290.         break;
  291.     case I965_PGETBL_SIZE_2MB:
  292.         size = KB(2048);
  293.         break;
  294.     case I965_PGETBL_SIZE_1_5MB:
  295.         size = KB(1024 + 512);
  296.         break;
  297.     default:
  298.                 dev_info(&intel_private.pcidev->dev,
  299.                          "unknown page table size, assuming 512KB\n");
  300.         size = KB(512);
  301.     }
  302.  
  303.     return size/4;
  304. }
  305.  
  306. static unsigned int intel_gtt_total_entries(void)
  307. {
  308.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  309.         return i965_gtt_total_entries();
  310.         else {
  311.         /* On previous hardware, the GTT size was just what was
  312.          * required to map the aperture.
  313.          */
  314.                 return intel_private.gtt_mappable_entries;
  315.     }
  316. }
  317.  
  318. static unsigned int intel_gtt_mappable_entries(void)
  319. {
  320.     unsigned int aperture_size;
  321.  
  322.     if (INTEL_GTT_GEN == 1) {
  323.         u32 smram_miscc;
  324.  
  325.         pci_read_config_dword(intel_private.bridge_dev,
  326.                       I810_SMRAM_MISCC, &smram_miscc);
  327.  
  328.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  329.                 == I810_GFX_MEM_WIN_32M)
  330.             aperture_size = MB(32);
  331.         else
  332.             aperture_size = MB(64);
  333.     } else if (INTEL_GTT_GEN == 2) {
  334.         u16 gmch_ctrl;
  335.  
  336.         pci_read_config_word(intel_private.bridge_dev,
  337.                      I830_GMCH_CTRL, &gmch_ctrl);
  338.  
  339.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  340.             aperture_size = MB(64);
  341.         else
  342.             aperture_size = MB(128);
  343.     } else {
  344.         /* 9xx supports large sizes, just look at the length */
  345.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  346.     }
  347.  
  348.     return aperture_size >> PAGE_SHIFT;
  349. }
  350.  
  351. static void intel_gtt_teardown_scratch_page(void)
  352. {
  353.    // FreePage(intel_private.scratch_page_dma);
  354. }
  355.  
  356. static void intel_gtt_cleanup(void)
  357. {
  358.     intel_private.driver->cleanup();
  359.  
  360.         iounmap(intel_private.gtt);
  361.         iounmap(intel_private.registers);
  362.  
  363.         intel_gtt_teardown_scratch_page();
  364. }
  365.  
  366. /* Certain Gen5 chipsets require require idling the GPU before
  367.  * unmapping anything from the GTT when VT-d is enabled.
  368.  */
  369. static inline int needs_ilk_vtd_wa(void)
  370. {
  371. #ifdef CONFIG_INTEL_IOMMU
  372.         const unsigned short gpu_devid = intel_private.pcidev->device;
  373.  
  374.         /* Query intel_iommu to see if we need the workaround. Presumably that
  375.          * was loaded first.
  376.          */
  377.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  378.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  379.              intel_iommu_gfx_mapped)
  380.                 return 1;
  381. #endif
  382.         return 0;
  383. }
  384.  
  385. static bool intel_gtt_can_wc(void)
  386. {
  387.         if (INTEL_GTT_GEN <= 2)
  388.                 return false;
  389.  
  390.         if (INTEL_GTT_GEN >= 6)
  391.                 return false;
  392.  
  393.         /* Reports of major corruption with ILK vt'd enabled */
  394.         if (needs_ilk_vtd_wa())
  395.                 return false;
  396.  
  397.         return true;
  398. }
  399.  
  400. static int intel_gtt_init(void)
  401. {
  402.         u32 gma_addr;
  403.     u32 gtt_map_size;
  404.     int ret;
  405.  
  406.     ret = intel_private.driver->setup();
  407.     if (ret != 0)
  408.         return ret;
  409.  
  410.         intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
  411.         intel_private.gtt_total_entries = intel_gtt_total_entries();
  412.  
  413.     /* save the PGETBL reg for resume */
  414.     intel_private.PGETBL_save =
  415.         readl(intel_private.registers+I810_PGETBL_CTL)
  416.             & ~I810_PGETBL_ENABLED;
  417.     /* we only ever restore the register when enabling the PGTBL... */
  418.     if (HAS_PGTBL_EN)
  419.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  420.  
  421.         dev_info(&intel_private.bridge_dev->dev,
  422.                         "detected gtt size: %dK total, %dK mappable\n",
  423.                         intel_private.gtt_total_entries * 4,
  424.                         intel_private.gtt_mappable_entries * 4);
  425.  
  426.         gtt_map_size = intel_private.gtt_total_entries * 4;
  427.  
  428.         intel_private.gtt = NULL;
  429.         if (intel_private.gtt == NULL)
  430.                 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
  431.                                             gtt_map_size);
  432.         if (intel_private.gtt == NULL) {
  433.         intel_private.driver->cleanup();
  434.                 iounmap(intel_private.registers);
  435.         return -ENOMEM;
  436.     }
  437.  
  438.     asm volatile("wbinvd":::"memory");
  439.  
  440.         intel_private.stolen_size = intel_gtt_stolen_size();
  441.  
  442.         intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  443.  
  444.     ret = intel_gtt_setup_scratch_page();
  445.     if (ret != 0) {
  446.         intel_gtt_cleanup();
  447.         return ret;
  448.     }
  449.  
  450.         if (INTEL_GTT_GEN <= 2)
  451.                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  452.                                       &gma_addr);
  453.         else
  454.                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  455.                                       &gma_addr);
  456.  
  457.         intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  458.  
  459.  
  460.     return 0;
  461. }
  462.  
  463. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  464.                              unsigned int flags)
  465. {
  466.         u32 pte_flags = I810_PTE_VALID;
  467.  
  468.         if (flags ==  AGP_USER_CACHED_MEMORY)
  469.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  470.  
  471.         writel(addr | pte_flags, intel_private.gtt + entry);
  472. }
  473.  
  474. bool intel_enable_gtt(void)
  475. {
  476.     u8 __iomem *reg;
  477.  
  478.     if (INTEL_GTT_GEN == 2) {
  479.         u16 gmch_ctrl;
  480.  
  481.         pci_read_config_word(intel_private.bridge_dev,
  482.                      I830_GMCH_CTRL, &gmch_ctrl);
  483.         gmch_ctrl |= I830_GMCH_ENABLED;
  484.         pci_write_config_word(intel_private.bridge_dev,
  485.                       I830_GMCH_CTRL, gmch_ctrl);
  486.  
  487.         pci_read_config_word(intel_private.bridge_dev,
  488.                      I830_GMCH_CTRL, &gmch_ctrl);
  489.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  490.                         dev_err(&intel_private.pcidev->dev,
  491.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  492.                 gmch_ctrl);
  493.             return false;
  494.         }
  495.     }
  496.  
  497.     /* On the resume path we may be adjusting the PGTBL value, so
  498.      * be paranoid and flush all chipset write buffers...
  499.      */
  500.     if (INTEL_GTT_GEN >= 3)
  501.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  502.  
  503.     reg = intel_private.registers+I810_PGETBL_CTL;
  504.     writel(intel_private.PGETBL_save, reg);
  505.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  506.                 dev_err(&intel_private.pcidev->dev,
  507.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  508.             readl(reg), intel_private.PGETBL_save);
  509.         return false;
  510.     }
  511.  
  512.     if (INTEL_GTT_GEN >= 3)
  513.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  514.  
  515.     return true;
  516. }
  517.  
  518. static bool i830_check_flags(unsigned int flags)
  519. {
  520.         switch (flags) {
  521.         case 0:
  522.         case AGP_PHYS_MEMORY:
  523.         case AGP_USER_CACHED_MEMORY:
  524.         case AGP_USER_MEMORY:
  525.                 return true;
  526.         }
  527.  
  528.         return false;
  529. }
  530.  
  531. void intel_gtt_insert_sg_entries(struct sg_table *st,
  532.                                  unsigned int pg_start,
  533.                                  unsigned int flags)
  534. {
  535.         struct scatterlist *sg;
  536.         unsigned int len, m;
  537.     int i, j;
  538.  
  539.         j = pg_start;
  540.  
  541.         /* sg may merge pages, but we have to separate
  542.          * per-page addr for GTT */
  543.         for_each_sg(st->sgl, sg, st->nents, i) {
  544.                 len = sg_dma_len(sg) >> PAGE_SHIFT;
  545.                 for (m = 0; m < len; m++) {
  546.                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  547.         intel_private.driver->write_entry(addr, j, flags);
  548.         j++;
  549.                 }
  550.         }
  551.         readl(intel_private.gtt+j-1);
  552. }
  553. EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
  554.  
  555. static void intel_gtt_insert_pages(unsigned int first_entry,
  556.                                    unsigned int num_entries,
  557.                                    struct page **pages,
  558.                                    unsigned int flags)
  559. {
  560.     int i, j;
  561.  
  562.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  563.                 dma_addr_t addr = page_to_phys(pages[i]);
  564.         intel_private.driver->write_entry(addr,
  565.                           j, flags);
  566.     }
  567.     readl(intel_private.gtt+j-1);
  568. }
  569.  
  570.  
  571. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  572. {
  573.         unsigned int i;
  574.  
  575.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  576.                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
  577.                                                   i, 0);
  578.         }
  579.         readl(intel_private.gtt+i-1);
  580. }
  581. static void intel_i915_setup_chipset_flush(void)
  582. {
  583.         int ret;
  584.         u32 temp;
  585.  
  586.         pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
  587.         if (!(temp & 0x1)) {
  588. //              intel_alloc_chipset_flush_resource();
  589. //              intel_private.resource_valid = 1;
  590. //              pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  591.         } else {
  592.                 temp &= ~1;
  593.  
  594.                 intel_private.resource_valid = 1;
  595.                 intel_private.ifp_resource.start = temp;
  596.                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
  597. //              ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  598.                 /* some BIOSes reserve this area in a pnp some don't */
  599. //              if (ret)
  600. //                      intel_private.resource_valid = 0;
  601.         }
  602. }
  603.  
  604. static void intel_i965_g33_setup_chipset_flush(void)
  605. {
  606.         u32 temp_hi, temp_lo;
  607.         int ret;
  608.  
  609.         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
  610.         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
  611.  
  612.         if (!(temp_lo & 0x1)) {
  613.  
  614. //              intel_alloc_chipset_flush_resource();
  615.  
  616. //              intel_private.resource_valid = 1;
  617. //              pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
  618. //                      upper_32_bits(intel_private.ifp_resource.start));
  619. //              pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  620.         } else {
  621.                 u64 l64;
  622.  
  623.                 temp_lo &= ~0x1;
  624.                 l64 = ((u64)temp_hi << 32) | temp_lo;
  625.  
  626.                 intel_private.resource_valid = 1;
  627.                 intel_private.ifp_resource.start = l64;
  628.                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
  629. //              ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  630.                 /* some BIOSes reserve this area in a pnp some don't */
  631. //              if (ret)
  632. //                      intel_private.resource_valid = 0;
  633.         }
  634. }
  635.  
  636. static void intel_i9xx_setup_flush(void)
  637. {
  638.     /* return if already configured */
  639.     if (intel_private.ifp_resource.start)
  640.         return;
  641.  
  642.     if (INTEL_GTT_GEN == 6)
  643.         return;
  644.  
  645.     /* setup a resource for this object */
  646.         intel_private.ifp_resource.name = "Intel Flush Page";
  647.         intel_private.ifp_resource.flags = IORESOURCE_MEM;
  648.  
  649.     /* Setup chipset flush for 915 */
  650.         if (IS_G33 || INTEL_GTT_GEN >= 4) {
  651.                 intel_i965_g33_setup_chipset_flush();
  652.         } else {
  653.                 intel_i915_setup_chipset_flush();
  654.         }
  655.  
  656.         if (intel_private.ifp_resource.start)
  657.                 intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
  658.     if (!intel_private.i9xx_flush_page)
  659.         dev_err(&intel_private.pcidev->dev,
  660.             "can't ioremap flush page - no chipset flushing\n");
  661. }
  662.  
  663. static void i9xx_cleanup(void)
  664. {
  665.         if (intel_private.i9xx_flush_page)
  666.                 iounmap(intel_private.i9xx_flush_page);
  667. //      if (intel_private.resource_valid)
  668. //              release_resource(&intel_private.ifp_resource);
  669.         intel_private.ifp_resource.start = 0;
  670.         intel_private.resource_valid = 0;
  671. }
  672.  
  673. static void i9xx_chipset_flush(void)
  674. {
  675.     if (intel_private.i9xx_flush_page)
  676.         writel(1, intel_private.i9xx_flush_page);
  677. }
  678.  
  679. static void i965_write_entry(dma_addr_t addr,
  680.                              unsigned int entry,
  681.                              unsigned int flags)
  682. {
  683.         u32 pte_flags;
  684.  
  685.         pte_flags = I810_PTE_VALID;
  686.         if (flags == AGP_USER_CACHED_MEMORY)
  687.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  688.  
  689.         /* Shift high bits down */
  690.         addr |= (addr >> 28) & 0xf0;
  691.         writel(addr | pte_flags, intel_private.gtt + entry);
  692. }
  693.  
  694. static int i9xx_setup(void)
  695. {
  696.         u32 reg_addr, gtt_addr;
  697.         int size = KB(512);
  698.  
  699.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  700.  
  701.     reg_addr &= 0xfff80000;
  702.  
  703.         intel_private.registers = ioremap(reg_addr, size);
  704.     if (!intel_private.registers)
  705.         return -ENOMEM;
  706.  
  707.         switch (INTEL_GTT_GEN) {
  708.         case 3:
  709.         pci_read_config_dword(intel_private.pcidev,
  710.                       I915_PTEADDR, &gtt_addr);
  711.         intel_private.gtt_bus_addr = gtt_addr;
  712.                 break;
  713.         case 5:
  714.                 intel_private.gtt_bus_addr = reg_addr + MB(2);
  715.             break;
  716.         default:
  717.                 intel_private.gtt_bus_addr = reg_addr + KB(512);
  718.             break;
  719.         }
  720.  
  721.     intel_i9xx_setup_flush();
  722.  
  723.     return 0;
  724. }
  725.  
  726. static const struct intel_gtt_driver i915_gtt_driver = {
  727.         .gen = 3,
  728.         .has_pgtbl_enable = 1,
  729.         .setup = i9xx_setup,
  730.         .cleanup = i9xx_cleanup,
  731.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  732.         .write_entry = i830_write_entry,
  733.         .dma_mask_size = 32,
  734.         .check_flags = i830_check_flags,
  735.         .chipset_flush = i9xx_chipset_flush,
  736. };
  737. static const struct intel_gtt_driver g33_gtt_driver = {
  738.         .gen = 3,
  739.         .is_g33 = 1,
  740.         .setup = i9xx_setup,
  741.         .cleanup = i9xx_cleanup,
  742.         .write_entry = i965_write_entry,
  743.         .dma_mask_size = 36,
  744.         .check_flags = i830_check_flags,
  745.         .chipset_flush = i9xx_chipset_flush,
  746. };
  747. static const struct intel_gtt_driver pineview_gtt_driver = {
  748.         .gen = 3,
  749.         .is_pineview = 1, .is_g33 = 1,
  750.         .setup = i9xx_setup,
  751.         .cleanup = i9xx_cleanup,
  752.         .write_entry = i965_write_entry,
  753.         .dma_mask_size = 36,
  754.         .check_flags = i830_check_flags,
  755.         .chipset_flush = i9xx_chipset_flush,
  756. };
  757. static const struct intel_gtt_driver i965_gtt_driver = {
  758.         .gen = 4,
  759.         .has_pgtbl_enable = 1,
  760.         .setup = i9xx_setup,
  761.         .cleanup = i9xx_cleanup,
  762.         .write_entry = i965_write_entry,
  763.         .dma_mask_size = 36,
  764.         .check_flags = i830_check_flags,
  765.         .chipset_flush = i9xx_chipset_flush,
  766. };
  767. static const struct intel_gtt_driver g4x_gtt_driver = {
  768.         .gen = 5,
  769.         .setup = i9xx_setup,
  770.         .cleanup = i9xx_cleanup,
  771.         .write_entry = i965_write_entry,
  772.         .dma_mask_size = 36,
  773.         .check_flags = i830_check_flags,
  774.         .chipset_flush = i9xx_chipset_flush,
  775. };
  776. static const struct intel_gtt_driver ironlake_gtt_driver = {
  777.         .gen = 5,
  778.         .is_ironlake = 1,
  779.         .setup = i9xx_setup,
  780.         .cleanup = i9xx_cleanup,
  781.         .write_entry = i965_write_entry,
  782.         .dma_mask_size = 36,
  783.         .check_flags = i830_check_flags,
  784.         .chipset_flush = i9xx_chipset_flush,
  785. };
  786.  
  787. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  788.  * driver and gmch_driver must be non-null, and find_gmch will determine
  789.  * which one should be used if a gmch_chip_id is present.
  790.  */
  791. static const struct intel_gtt_driver_description {
  792.     unsigned int gmch_chip_id;
  793.     char *name;
  794.     const struct intel_gtt_driver *gtt_driver;
  795. } intel_gtt_chipsets[] = {
  796.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  797.                 &i915_gtt_driver },
  798.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  799.                 &i915_gtt_driver },
  800.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  801.                 &i915_gtt_driver },
  802.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  803.                 &i915_gtt_driver },
  804.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  805.                 &i915_gtt_driver },
  806.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  807.                 &i915_gtt_driver },
  808.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  809.                 &i965_gtt_driver },
  810.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  811.                 &i965_gtt_driver },
  812.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  813.                 &i965_gtt_driver },
  814.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  815.                 &i965_gtt_driver },
  816.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  817.                 &i965_gtt_driver },
  818.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  819.                 &i965_gtt_driver },
  820.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  821.                 &g33_gtt_driver },
  822.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  823.                 &g33_gtt_driver },
  824.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  825.                 &g33_gtt_driver },
  826.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  827.                 &pineview_gtt_driver },
  828.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  829.                 &pineview_gtt_driver },
  830.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  831.                 &g4x_gtt_driver },
  832.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  833.                 &g4x_gtt_driver },
  834.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  835.                 &g4x_gtt_driver },
  836.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  837.                 &g4x_gtt_driver },
  838.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  839.                 &g4x_gtt_driver },
  840.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  841.                 &g4x_gtt_driver },
  842.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  843.                 &g4x_gtt_driver },
  844.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  845.             "HD Graphics", &ironlake_gtt_driver },
  846.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  847.             "HD Graphics", &ironlake_gtt_driver },
  848.     { 0, NULL, NULL }
  849. };
  850.  
  851. static int find_gmch(u16 device)
  852. {
  853.     struct pci_dev *gmch_device;
  854.  
  855.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  856.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  857.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  858.                          device, gmch_device);
  859.     }
  860.  
  861.     if (!gmch_device)
  862.         return 0;
  863.  
  864.     intel_private.pcidev = gmch_device;
  865.     return 1;
  866. }
  867.  
  868. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  869.                       struct agp_bridge_data *bridge)
  870. {
  871.     int i, mask;
  872.  
  873.         /*
  874.          * Can be called from the fake agp driver but also directly from
  875.          * drm/i915.ko. Hence we need to check whether everything is set up
  876.          * already.
  877.          */
  878.         if (intel_private.driver) {
  879.                 intel_private.refcount++;
  880.                 return 1;
  881.         }
  882.  
  883.  
  884.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  885.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  886.             intel_private.driver =
  887.                 intel_gtt_chipsets[i].gtt_driver;
  888.             break;
  889.         }
  890.     }
  891.  
  892.     if (!intel_private.driver)
  893.         return 0;
  894.  
  895.         intel_private.refcount++;
  896.  
  897.         if (bridge) {
  898.                 bridge->dev_private_data = &intel_private;
  899.                 bridge->dev = bridge_pdev;
  900.         }
  901.  
  902.     intel_private.bridge_dev = bridge_pdev;
  903.  
  904.         dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  905.  
  906.     mask = intel_private.driver->dma_mask_size;
  907. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  908. //        dev_err(&intel_private.pcidev->dev,
  909. //            "set gfx device dma mask %d-bit failed!\n", mask);
  910. //    else
  911. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  912. //                        DMA_BIT_MASK(mask));
  913.  
  914.         if (intel_gtt_init() != 0) {
  915. //              intel_gmch_remove();
  916.  
  917.         return 0;
  918.         }
  919.  
  920.     return 1;
  921. }
  922. EXPORT_SYMBOL(intel_gmch_probe);
  923.  
  924. void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
  925.                    phys_addr_t *mappable_base, unsigned long *mappable_end)
  926. {
  927.         *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
  928.         *stolen_size = intel_private.stolen_size;
  929.         *mappable_base = intel_private.gma_bus_addr;
  930.         *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
  931. }
  932. EXPORT_SYMBOL(intel_gtt_get);
  933.  
  934. void intel_gtt_chipset_flush(void)
  935. {
  936.         if (intel_private.driver->chipset_flush)
  937.                 intel_private.driver->chipset_flush();
  938. }
  939. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  940.  
  941.  
  942. MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
  943. MODULE_LICENSE("GPL and additional rights");
  944.