Subversion Repositories Kolibri OS

Rev

Rev 3480 | Rev 4389 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <syscall.h>
  19.  
  20. #include <linux/module.h>
  21. #include <errno-base.h>
  22. #include <linux/pci.h>
  23. #include <linux/kernel.h>
  24. #include <linux/export.h>
  25. #include <linux/scatterlist.h>
  26.  
  27. //#include <linux/pagemap.h>
  28. //#include <linux/agp_backend.h>
  29. //#include <asm/smp.h>
  30. #include <linux/spinlock.h>
  31. #include "agp.h"
  32. #include "intel-agp.h"
  33. #include <drm/intel-gtt.h>
  34.  
  35.  
  36. struct pci_dev *
  37. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  38.  
  39.  
  40. #define PCI_VENDOR_ID_INTEL             0x8086
  41. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  42. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  43. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  44. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  45. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  46. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  47.  
  48.  
  49. #define AGP_NORMAL_MEMORY 0
  50.  
  51. #define AGP_USER_TYPES (1 << 16)
  52. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  53. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  54.  
  55.  
  56.  
  57. /*
  58.  * If we have Intel graphics, we're not going to have anything other than
  59.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  60.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  61.  * Only newer chipsets need to bother with this, of course.
  62.  */
  63. #ifdef CONFIG_INTEL_IOMMU
  64. #define USE_PCI_DMA_API 1
  65. #else
  66. #define USE_PCI_DMA_API 0
  67. #endif
  68.  
  69. struct intel_gtt_driver {
  70.     unsigned int gen : 8;
  71.     unsigned int is_g33 : 1;
  72.     unsigned int is_pineview : 1;
  73.     unsigned int is_ironlake : 1;
  74.     unsigned int has_pgtbl_enable : 1;
  75.     unsigned int dma_mask_size : 8;
  76.     /* Chipset specific GTT setup */
  77.     int (*setup)(void);
  78.     /* This should undo anything done in ->setup() save the unmapping
  79.      * of the mmio register file, that's done in the generic code. */
  80.     void (*cleanup)(void);
  81.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  82.     /* Flags is a more or less chipset specific opaque value.
  83.      * For chipsets that need to support old ums (non-gem) code, this
  84.      * needs to be identical to the various supported agp memory types! */
  85.     bool (*check_flags)(unsigned int flags);
  86.     void (*chipset_flush)(void);
  87. };
  88.  
  89. static struct _intel_private {
  90.     const struct intel_gtt_driver *driver;
  91.     struct pci_dev *pcidev; /* device one */
  92.     struct pci_dev *bridge_dev;
  93.     u8 __iomem *registers;
  94.     phys_addr_t gtt_bus_addr;
  95.     u32 PGETBL_save;
  96.     u32 __iomem *gtt;       /* I915G */
  97.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  98.     int num_dcache_entries;
  99.     void __iomem *i9xx_flush_page;
  100.     char *i81x_gtt_table;
  101.     struct resource ifp_resource;
  102.     int resource_valid;
  103.     struct page *scratch_page;
  104.         phys_addr_t scratch_page_dma;
  105.         int refcount;
  106.         /* Whether i915 needs to use the dmar apis or not. */
  107.         unsigned int needs_dmar : 1;
  108.         phys_addr_t gma_bus_addr;
  109.         /*  Size of memory reserved for graphics by the BIOS */
  110.         unsigned int stolen_size;
  111.         /* Total number of gtt entries. */
  112.         unsigned int gtt_total_entries;
  113.         /* Part of the gtt that is mappable by the cpu, for those chips where
  114.          * this is not the full gtt. */
  115.         unsigned int gtt_mappable_entries;
  116. } intel_private;
  117.  
  118. #define INTEL_GTT_GEN   intel_private.driver->gen
  119. #define IS_G33          intel_private.driver->is_g33
  120. #define IS_PINEVIEW     intel_private.driver->is_pineview
  121. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  122. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  123.  
  124. static int intel_gtt_setup_scratch_page(void)
  125. {
  126.         struct page *page;
  127.         dma_addr_t dma_addr;
  128.  
  129.         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  130.         if (page == NULL)
  131.         return -ENOMEM;
  132.                 intel_private.scratch_page_dma = page_to_phys(page);
  133.  
  134.         intel_private.scratch_page = page;
  135.  
  136.     return 0;
  137. }
  138.  
  139. static unsigned int intel_gtt_stolen_size(void)
  140. {
  141.     u16 gmch_ctrl;
  142.     u8 rdct;
  143.     int local = 0;
  144.     static const int ddt[4] = { 0, 16, 32, 64 };
  145.     unsigned int stolen_size = 0;
  146.  
  147.     if (INTEL_GTT_GEN == 1)
  148.         return 0; /* no stolen mem on i81x */
  149.  
  150.     pci_read_config_word(intel_private.bridge_dev,
  151.                  I830_GMCH_CTRL, &gmch_ctrl);
  152.  
  153.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  154.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  155.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  156.         case I830_GMCH_GMS_STOLEN_512:
  157.             stolen_size = KB(512);
  158.             break;
  159.         case I830_GMCH_GMS_STOLEN_1024:
  160.             stolen_size = MB(1);
  161.             break;
  162.         case I830_GMCH_GMS_STOLEN_8192:
  163.             stolen_size = MB(8);
  164.             break;
  165.         case I830_GMCH_GMS_LOCAL:
  166.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  167.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  168.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  169.             local = 1;
  170.             break;
  171.         default:
  172.             stolen_size = 0;
  173.             break;
  174.         }
  175.     } else {
  176.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  177.         case I855_GMCH_GMS_STOLEN_1M:
  178.             stolen_size = MB(1);
  179.             break;
  180.         case I855_GMCH_GMS_STOLEN_4M:
  181.             stolen_size = MB(4);
  182.             break;
  183.         case I855_GMCH_GMS_STOLEN_8M:
  184.             stolen_size = MB(8);
  185.             break;
  186.         case I855_GMCH_GMS_STOLEN_16M:
  187.             stolen_size = MB(16);
  188.             break;
  189.         case I855_GMCH_GMS_STOLEN_32M:
  190.             stolen_size = MB(32);
  191.             break;
  192.         case I915_GMCH_GMS_STOLEN_48M:
  193.             stolen_size = MB(48);
  194.             break;
  195.         case I915_GMCH_GMS_STOLEN_64M:
  196.             stolen_size = MB(64);
  197.             break;
  198.         case G33_GMCH_GMS_STOLEN_128M:
  199.             stolen_size = MB(128);
  200.             break;
  201.         case G33_GMCH_GMS_STOLEN_256M:
  202.             stolen_size = MB(256);
  203.             break;
  204.         case INTEL_GMCH_GMS_STOLEN_96M:
  205.             stolen_size = MB(96);
  206.             break;
  207.         case INTEL_GMCH_GMS_STOLEN_160M:
  208.             stolen_size = MB(160);
  209.             break;
  210.         case INTEL_GMCH_GMS_STOLEN_224M:
  211.             stolen_size = MB(224);
  212.             break;
  213.         case INTEL_GMCH_GMS_STOLEN_352M:
  214.             stolen_size = MB(352);
  215.             break;
  216.         default:
  217.             stolen_size = 0;
  218.             break;
  219.         }
  220.     }
  221.  
  222.     if (stolen_size > 0) {
  223.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  224.                stolen_size / KB(1), local ? "local" : "stolen");
  225.     } else {
  226.                 dev_info(&intel_private.bridge_dev->dev,
  227.                        "no pre-allocated video memory detected\n");
  228.         stolen_size = 0;
  229.     }
  230.  
  231.     return stolen_size;
  232. }
  233.  
  234. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  235. {
  236.     u32 pgetbl_ctl, pgetbl_ctl2;
  237.  
  238.     /* ensure that ppgtt is disabled */
  239.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  240.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  241.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  242.  
  243.     /* write the new ggtt size */
  244.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  245.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  246.     pgetbl_ctl |= size_flag;
  247.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  248. }
  249.  
  250. static unsigned int i965_gtt_total_entries(void)
  251. {
  252.     int size;
  253.     u32 pgetbl_ctl;
  254.     u16 gmch_ctl;
  255.  
  256.     pci_read_config_word(intel_private.bridge_dev,
  257.                  I830_GMCH_CTRL, &gmch_ctl);
  258.  
  259.     if (INTEL_GTT_GEN == 5) {
  260.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  261.         case G4x_GMCH_SIZE_1M:
  262.         case G4x_GMCH_SIZE_VT_1M:
  263.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  264.             break;
  265.         case G4x_GMCH_SIZE_VT_1_5M:
  266.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  267.             break;
  268.         case G4x_GMCH_SIZE_2M:
  269.         case G4x_GMCH_SIZE_VT_2M:
  270.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  271.             break;
  272.         }
  273.     }
  274.  
  275.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  276.  
  277.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  278.     case I965_PGETBL_SIZE_128KB:
  279.         size = KB(128);
  280.         break;
  281.     case I965_PGETBL_SIZE_256KB:
  282.         size = KB(256);
  283.         break;
  284.     case I965_PGETBL_SIZE_512KB:
  285.         size = KB(512);
  286.         break;
  287.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  288.     case I965_PGETBL_SIZE_1MB:
  289.         size = KB(1024);
  290.         break;
  291.     case I965_PGETBL_SIZE_2MB:
  292.         size = KB(2048);
  293.         break;
  294.     case I965_PGETBL_SIZE_1_5MB:
  295.         size = KB(1024 + 512);
  296.         break;
  297.     default:
  298.                 dev_info(&intel_private.pcidev->dev,
  299.                          "unknown page table size, assuming 512KB\n");
  300.         size = KB(512);
  301.     }
  302.  
  303.     return size/4;
  304. }
  305.  
  306. static unsigned int intel_gtt_total_entries(void)
  307. {
  308.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  309.         return i965_gtt_total_entries();
  310.         else {
  311.         /* On previous hardware, the GTT size was just what was
  312.          * required to map the aperture.
  313.          */
  314.                 return intel_private.gtt_mappable_entries;
  315.     }
  316. }
  317.  
  318. static unsigned int intel_gtt_mappable_entries(void)
  319. {
  320.     unsigned int aperture_size;
  321.  
  322.     if (INTEL_GTT_GEN == 1) {
  323.         u32 smram_miscc;
  324.  
  325.         pci_read_config_dword(intel_private.bridge_dev,
  326.                       I810_SMRAM_MISCC, &smram_miscc);
  327.  
  328.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  329.                 == I810_GFX_MEM_WIN_32M)
  330.             aperture_size = MB(32);
  331.         else
  332.             aperture_size = MB(64);
  333.     } else if (INTEL_GTT_GEN == 2) {
  334.         u16 gmch_ctrl;
  335.  
  336.         pci_read_config_word(intel_private.bridge_dev,
  337.                      I830_GMCH_CTRL, &gmch_ctrl);
  338.  
  339.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  340.             aperture_size = MB(64);
  341.         else
  342.             aperture_size = MB(128);
  343.     } else {
  344.         /* 9xx supports large sizes, just look at the length */
  345.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  346.     }
  347.  
  348.     return aperture_size >> PAGE_SHIFT;
  349. }
  350.  
  351. static void intel_gtt_teardown_scratch_page(void)
  352. {
  353.    // FreePage(intel_private.scratch_page_dma);
  354. }
  355.  
  356. static void intel_gtt_cleanup(void)
  357. {
  358.     intel_private.driver->cleanup();
  359.  
  360.         iounmap(intel_private.gtt);
  361.         iounmap(intel_private.registers);
  362.  
  363.         intel_gtt_teardown_scratch_page();
  364. }
  365.  
  366. /* Certain Gen5 chipsets require require idling the GPU before
  367.  * unmapping anything from the GTT when VT-d is enabled.
  368.  */
  369. static inline int needs_ilk_vtd_wa(void)
  370. {
  371. #ifdef CONFIG_INTEL_IOMMU
  372.         const unsigned short gpu_devid = intel_private.pcidev->device;
  373.  
  374.         /* Query intel_iommu to see if we need the workaround. Presumably that
  375.          * was loaded first.
  376.          */
  377.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  378.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  379.              intel_iommu_gfx_mapped)
  380.                 return 1;
  381. #endif
  382.         return 0;
  383. }
  384.  
  385. static bool intel_gtt_can_wc(void)
  386. {
  387.         if (INTEL_GTT_GEN <= 2)
  388.                 return false;
  389.  
  390.         if (INTEL_GTT_GEN >= 6)
  391.                 return false;
  392.  
  393.         /* Reports of major corruption with ILK vt'd enabled */
  394.         if (needs_ilk_vtd_wa())
  395.                 return false;
  396.  
  397.         return true;
  398. }
  399.  
  400. static int intel_gtt_init(void)
  401. {
  402.         u32 gma_addr;
  403.     u32 gtt_map_size;
  404.     int ret;
  405.  
  406.     ret = intel_private.driver->setup();
  407.     if (ret != 0)
  408.         return ret;
  409.  
  410.         intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
  411.         intel_private.gtt_total_entries = intel_gtt_total_entries();
  412.  
  413.     /* save the PGETBL reg for resume */
  414.     intel_private.PGETBL_save =
  415.         readl(intel_private.registers+I810_PGETBL_CTL)
  416.             & ~I810_PGETBL_ENABLED;
  417.     /* we only ever restore the register when enabling the PGTBL... */
  418.     if (HAS_PGTBL_EN)
  419.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  420.  
  421.         dev_info(&intel_private.bridge_dev->dev,
  422.                         "detected gtt size: %dK total, %dK mappable\n",
  423.                         intel_private.gtt_total_entries * 4,
  424.                         intel_private.gtt_mappable_entries * 4);
  425.  
  426.         gtt_map_size = intel_private.gtt_total_entries * 4;
  427.  
  428.         intel_private.gtt = NULL;
  429.         if (intel_private.gtt == NULL)
  430.                 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
  431.                                             gtt_map_size);
  432.         if (intel_private.gtt == NULL) {
  433.         intel_private.driver->cleanup();
  434.                 iounmap(intel_private.registers);
  435.         return -ENOMEM;
  436.     }
  437.  
  438.     asm volatile("wbinvd");
  439.  
  440.         intel_private.stolen_size = intel_gtt_stolen_size();
  441.  
  442.         intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  443.  
  444.     ret = intel_gtt_setup_scratch_page();
  445.     if (ret != 0) {
  446.         intel_gtt_cleanup();
  447.         return ret;
  448.     }
  449.  
  450.         if (INTEL_GTT_GEN <= 2)
  451.                 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  452.                                       &gma_addr);
  453.         else
  454.                 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  455.                                       &gma_addr);
  456.  
  457.         intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  458.  
  459.  
  460.     return 0;
  461. }
  462.  
  463. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  464.                              unsigned int flags)
  465. {
  466.         u32 pte_flags = I810_PTE_VALID;
  467.  
  468.         if (flags ==  AGP_USER_CACHED_MEMORY)
  469.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  470.  
  471.         writel(addr | pte_flags, intel_private.gtt + entry);
  472. }
  473.  
  474. bool intel_enable_gtt(void)
  475. {
  476.     u8 __iomem *reg;
  477.  
  478.     if (INTEL_GTT_GEN == 2) {
  479.         u16 gmch_ctrl;
  480.  
  481.         pci_read_config_word(intel_private.bridge_dev,
  482.                      I830_GMCH_CTRL, &gmch_ctrl);
  483.         gmch_ctrl |= I830_GMCH_ENABLED;
  484.         pci_write_config_word(intel_private.bridge_dev,
  485.                       I830_GMCH_CTRL, gmch_ctrl);
  486.  
  487.         pci_read_config_word(intel_private.bridge_dev,
  488.                      I830_GMCH_CTRL, &gmch_ctrl);
  489.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  490.                         dev_err(&intel_private.pcidev->dev,
  491.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  492.                 gmch_ctrl);
  493.             return false;
  494.         }
  495.     }
  496.  
  497.     /* On the resume path we may be adjusting the PGTBL value, so
  498.      * be paranoid and flush all chipset write buffers...
  499.      */
  500.     if (INTEL_GTT_GEN >= 3)
  501.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  502.  
  503.     reg = intel_private.registers+I810_PGETBL_CTL;
  504.     writel(intel_private.PGETBL_save, reg);
  505.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  506.                 dev_err(&intel_private.pcidev->dev,
  507.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  508.             readl(reg), intel_private.PGETBL_save);
  509.         return false;
  510.     }
  511.  
  512.     if (INTEL_GTT_GEN >= 3)
  513.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  514.  
  515.     return true;
  516. }
  517.  
  518. static bool i830_check_flags(unsigned int flags)
  519. {
  520.         switch (flags) {
  521.         case 0:
  522.         case AGP_PHYS_MEMORY:
  523.         case AGP_USER_CACHED_MEMORY:
  524.         case AGP_USER_MEMORY:
  525.                 return true;
  526.         }
  527.  
  528.         return false;
  529. }
  530.  
  531. void intel_gtt_insert_sg_entries(struct sg_table *st,
  532.                                  unsigned int pg_start,
  533.                                  unsigned int flags)
  534. {
  535.         struct scatterlist *sg;
  536.         unsigned int len, m;
  537.     int i, j;
  538.  
  539.         j = pg_start;
  540.  
  541.         /* sg may merge pages, but we have to separate
  542.          * per-page addr for GTT */
  543.         for_each_sg(st->sgl, sg, st->nents, i) {
  544.                 len = sg_dma_len(sg) >> PAGE_SHIFT;
  545.                 for (m = 0; m < len; m++) {
  546.                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  547.         intel_private.driver->write_entry(addr, j, flags);
  548.         j++;
  549.                 }
  550.         }
  551.         readl(intel_private.gtt+j-1);
  552. }
  553. EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
  554.  
  555. static void intel_gtt_insert_pages(unsigned int first_entry,
  556.                                    unsigned int num_entries,
  557.                                    struct page **pages,
  558.                                    unsigned int flags)
  559. {
  560.     int i, j;
  561.  
  562.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  563.                 dma_addr_t addr = page_to_phys(pages[i]);
  564.         intel_private.driver->write_entry(addr,
  565.                           j, flags);
  566.     }
  567.     readl(intel_private.gtt+j-1);
  568. }
  569.  
  570.  
  571. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  572. {
  573.         unsigned int i;
  574.  
  575.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  576.                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
  577.                                                   i, 0);
  578.         }
  579.         readl(intel_private.gtt+i-1);
  580. }
  581.  
  582. static void intel_i9xx_setup_flush(void)
  583. {
  584.     /* return if already configured */
  585.     if (intel_private.ifp_resource.start)
  586.         return;
  587.  
  588.     if (INTEL_GTT_GEN == 6)
  589.         return;
  590.  
  591.     /* setup a resource for this object */
  592. //    intel_private.ifp_resource.name = "Intel Flush Page";
  593. //    intel_private.ifp_resource.flags = IORESOURCE_MEM;
  594.  
  595.     intel_private.resource_valid = 0;
  596.  
  597.     /* Setup chipset flush for 915 */
  598. //    if (IS_G33 || INTEL_GTT_GEN >= 4) {
  599. //        intel_i965_g33_setup_chipset_flush();
  600. //    } else {
  601. //        intel_i915_setup_chipset_flush();
  602. //    }
  603.  
  604. //    if (intel_private.ifp_resource.start)
  605. //        intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  606.     if (!intel_private.i9xx_flush_page)
  607.         dev_err(&intel_private.pcidev->dev,
  608.             "can't ioremap flush page - no chipset flushing\n");
  609. }
  610.  
  611. static void i9xx_cleanup(void)
  612. {
  613.         if (intel_private.i9xx_flush_page)
  614.                 iounmap(intel_private.i9xx_flush_page);
  615. //      if (intel_private.resource_valid)
  616. //              release_resource(&intel_private.ifp_resource);
  617.         intel_private.ifp_resource.start = 0;
  618.         intel_private.resource_valid = 0;
  619. }
  620.  
  621. static void i9xx_chipset_flush(void)
  622. {
  623.     if (intel_private.i9xx_flush_page)
  624.         writel(1, intel_private.i9xx_flush_page);
  625. }
  626.  
  627. static void i965_write_entry(dma_addr_t addr,
  628.                              unsigned int entry,
  629.                              unsigned int flags)
  630. {
  631.         u32 pte_flags;
  632.  
  633.         pte_flags = I810_PTE_VALID;
  634.         if (flags == AGP_USER_CACHED_MEMORY)
  635.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  636.  
  637.         /* Shift high bits down */
  638.         addr |= (addr >> 28) & 0xf0;
  639.         writel(addr | pte_flags, intel_private.gtt + entry);
  640. }
  641.  
  642. static int i9xx_setup(void)
  643. {
  644.         u32 reg_addr, gtt_addr;
  645.         int size = KB(512);
  646.  
  647.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  648.  
  649.     reg_addr &= 0xfff80000;
  650.  
  651.         intel_private.registers = ioremap(reg_addr, size);
  652.     if (!intel_private.registers)
  653.         return -ENOMEM;
  654.  
  655.         switch (INTEL_GTT_GEN) {
  656.         case 3:
  657.         pci_read_config_dword(intel_private.pcidev,
  658.                       I915_PTEADDR, &gtt_addr);
  659.         intel_private.gtt_bus_addr = gtt_addr;
  660.                 break;
  661.         case 5:
  662.                 intel_private.gtt_bus_addr = reg_addr + MB(2);
  663.             break;
  664.         default:
  665.                 intel_private.gtt_bus_addr = reg_addr + KB(512);
  666.             break;
  667.         }
  668.  
  669.     intel_i9xx_setup_flush();
  670.  
  671.     return 0;
  672. }
  673.  
  674. static const struct intel_gtt_driver i915_gtt_driver = {
  675.         .gen = 3,
  676.         .has_pgtbl_enable = 1,
  677.         .setup = i9xx_setup,
  678.         .cleanup = i9xx_cleanup,
  679.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  680.         .write_entry = i830_write_entry,
  681.         .dma_mask_size = 32,
  682.         .check_flags = i830_check_flags,
  683.         .chipset_flush = i9xx_chipset_flush,
  684. };
  685. static const struct intel_gtt_driver g33_gtt_driver = {
  686.         .gen = 3,
  687.         .is_g33 = 1,
  688.         .setup = i9xx_setup,
  689.         .cleanup = i9xx_cleanup,
  690.         .write_entry = i965_write_entry,
  691.         .dma_mask_size = 36,
  692.         .check_flags = i830_check_flags,
  693.         .chipset_flush = i9xx_chipset_flush,
  694. };
  695. static const struct intel_gtt_driver pineview_gtt_driver = {
  696.         .gen = 3,
  697.         .is_pineview = 1, .is_g33 = 1,
  698.         .setup = i9xx_setup,
  699.         .cleanup = i9xx_cleanup,
  700.         .write_entry = i965_write_entry,
  701.         .dma_mask_size = 36,
  702.         .check_flags = i830_check_flags,
  703.         .chipset_flush = i9xx_chipset_flush,
  704. };
  705. static const struct intel_gtt_driver i965_gtt_driver = {
  706.         .gen = 4,
  707.         .has_pgtbl_enable = 1,
  708.         .setup = i9xx_setup,
  709.         .cleanup = i9xx_cleanup,
  710.         .write_entry = i965_write_entry,
  711.         .dma_mask_size = 36,
  712.         .check_flags = i830_check_flags,
  713.         .chipset_flush = i9xx_chipset_flush,
  714. };
  715. static const struct intel_gtt_driver g4x_gtt_driver = {
  716.         .gen = 5,
  717.         .setup = i9xx_setup,
  718.         .cleanup = i9xx_cleanup,
  719.         .write_entry = i965_write_entry,
  720.         .dma_mask_size = 36,
  721.         .check_flags = i830_check_flags,
  722.         .chipset_flush = i9xx_chipset_flush,
  723. };
  724. static const struct intel_gtt_driver ironlake_gtt_driver = {
  725.         .gen = 5,
  726.         .is_ironlake = 1,
  727.         .setup = i9xx_setup,
  728.         .cleanup = i9xx_cleanup,
  729.         .write_entry = i965_write_entry,
  730.         .dma_mask_size = 36,
  731.         .check_flags = i830_check_flags,
  732.         .chipset_flush = i9xx_chipset_flush,
  733. };
  734.  
  735. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  736.  * driver and gmch_driver must be non-null, and find_gmch will determine
  737.  * which one should be used if a gmch_chip_id is present.
  738.  */
  739. static const struct intel_gtt_driver_description {
  740.     unsigned int gmch_chip_id;
  741.     char *name;
  742.     const struct intel_gtt_driver *gtt_driver;
  743. } intel_gtt_chipsets[] = {
  744.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  745.                 &i915_gtt_driver },
  746.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  747.                 &i915_gtt_driver },
  748.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  749.                 &i915_gtt_driver },
  750.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  751.                 &i915_gtt_driver },
  752.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  753.                 &i915_gtt_driver },
  754.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  755.                 &i915_gtt_driver },
  756.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  757.                 &i965_gtt_driver },
  758.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  759.                 &i965_gtt_driver },
  760.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  761.                 &i965_gtt_driver },
  762.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  763.                 &i965_gtt_driver },
  764.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  765.                 &i965_gtt_driver },
  766.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  767.                 &i965_gtt_driver },
  768.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  769.                 &g33_gtt_driver },
  770.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  771.                 &g33_gtt_driver },
  772.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  773.                 &g33_gtt_driver },
  774.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  775.                 &pineview_gtt_driver },
  776.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  777.                 &pineview_gtt_driver },
  778.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  779.                 &g4x_gtt_driver },
  780.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  781.                 &g4x_gtt_driver },
  782.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  783.                 &g4x_gtt_driver },
  784.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  785.                 &g4x_gtt_driver },
  786.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  787.                 &g4x_gtt_driver },
  788.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  789.                 &g4x_gtt_driver },
  790.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  791.                 &g4x_gtt_driver },
  792.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  793.             "HD Graphics", &ironlake_gtt_driver },
  794.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  795.             "HD Graphics", &ironlake_gtt_driver },
  796.     { 0, NULL, NULL }
  797. };
  798.  
  799. static int find_gmch(u16 device)
  800. {
  801.     struct pci_dev *gmch_device;
  802.  
  803.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  804.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  805.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  806.                          device, gmch_device);
  807.     }
  808.  
  809.     if (!gmch_device)
  810.         return 0;
  811.  
  812.     intel_private.pcidev = gmch_device;
  813.     return 1;
  814. }
  815.  
  816. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  817.                       struct agp_bridge_data *bridge)
  818. {
  819.     int i, mask;
  820.  
  821.         /*
  822.          * Can be called from the fake agp driver but also directly from
  823.          * drm/i915.ko. Hence we need to check whether everything is set up
  824.          * already.
  825.          */
  826.         if (intel_private.driver) {
  827.                 intel_private.refcount++;
  828.                 return 1;
  829.         }
  830.  
  831.  
  832.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  833.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  834.             intel_private.driver =
  835.                 intel_gtt_chipsets[i].gtt_driver;
  836.             break;
  837.         }
  838.     }
  839.  
  840.     if (!intel_private.driver)
  841.         return 0;
  842.  
  843.         intel_private.refcount++;
  844.  
  845.         if (bridge) {
  846.                 bridge->dev_private_data = &intel_private;
  847.                 bridge->dev = bridge_pdev;
  848.         }
  849.  
  850.     intel_private.bridge_dev = bridge_pdev;
  851.  
  852.         dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  853.  
  854.     mask = intel_private.driver->dma_mask_size;
  855. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  856. //        dev_err(&intel_private.pcidev->dev,
  857. //            "set gfx device dma mask %d-bit failed!\n", mask);
  858. //    else
  859. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  860. //                        DMA_BIT_MASK(mask));
  861.  
  862.         if (intel_gtt_init() != 0) {
  863. //              intel_gmch_remove();
  864.  
  865.         return 0;
  866.         }
  867.  
  868.     return 1;
  869. }
  870. EXPORT_SYMBOL(intel_gmch_probe);
  871.  
  872. void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
  873.                    phys_addr_t *mappable_base, unsigned long *mappable_end)
  874. {
  875.         *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
  876.         *stolen_size = intel_private.stolen_size;
  877.         *mappable_base = intel_private.gma_bus_addr;
  878.         *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
  879. }
  880. EXPORT_SYMBOL(intel_gtt_get);
  881.  
  882. void intel_gtt_chipset_flush(void)
  883. {
  884.         if (intel_private.driver->chipset_flush)
  885.                 intel_private.driver->chipset_flush();
  886. }
  887. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  888.  
  889.  
  890. MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
  891. MODULE_LICENSE("GPL and additional rights");
  892.