Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <syscall.h>
  19.  
  20. #include <linux/module.h>
  21. #include <linux/pci.h>
  22. #include <linux/kernel.h>
  23. #include <linux/agp_backend.h>
  24. #include <linux/delay.h>
  25. #
  26. #include <linux/export.h>
  27. #include <linux/scatterlist.h>
  28.  
  29. #include <linux/spinlock.h>
  30. #include "agp.h"
  31. #include "intel-agp.h"
  32. #include <drm/intel-gtt.h>
  33.  
  34.  
  35. struct pci_dev *
  36. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  37.  
  38.  
  39. #define PCI_VENDOR_ID_INTEL             0x8086
  40. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  41. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  42. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  43. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  44. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  45. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  46.  
  47.  
  48. #define AGP_NORMAL_MEMORY 0
  49.  
  50. #define AGP_USER_TYPES (1 << 16)
  51. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  52. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  53.  
  54.  
  55.  
  56. /*
  57.  * If we have Intel graphics, we're not going to have anything other than
  58.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  59.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  60.  * Only newer chipsets need to bother with this, of course.
  61.  */
  62. #ifdef CONFIG_INTEL_IOMMU
  63. #define USE_PCI_DMA_API 1
  64. #else
  65. #define USE_PCI_DMA_API 0
  66. #endif
  67.  
  68. struct intel_gtt_driver {
  69.     unsigned int gen : 8;
  70.     unsigned int is_g33 : 1;
  71.     unsigned int is_pineview : 1;
  72.     unsigned int is_ironlake : 1;
  73.     unsigned int has_pgtbl_enable : 1;
  74.     unsigned int dma_mask_size : 8;
  75.     /* Chipset specific GTT setup */
  76.     int (*setup)(void);
  77.     /* This should undo anything done in ->setup() save the unmapping
  78.      * of the mmio register file, that's done in the generic code. */
  79.     void (*cleanup)(void);
  80.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  81.     /* Flags is a more or less chipset specific opaque value.
  82.      * For chipsets that need to support old ums (non-gem) code, this
  83.      * needs to be identical to the various supported agp memory types! */
  84.     bool (*check_flags)(unsigned int flags);
  85.     void (*chipset_flush)(void);
  86. };
  87.  
  88. static struct _intel_private {
  89.     const struct intel_gtt_driver *driver;
  90.     struct pci_dev *pcidev; /* device one */
  91.     struct pci_dev *bridge_dev;
  92.     u8 __iomem *registers;
  93.         phys_addr_t gtt_phys_addr;
  94.     u32 PGETBL_save;
  95.     u32 __iomem *gtt;       /* I915G */
  96.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  97.     int num_dcache_entries;
  98.     void __iomem *i9xx_flush_page;
  99.     char *i81x_gtt_table;
  100.     struct resource ifp_resource;
  101.     int resource_valid;
  102.     struct page *scratch_page;
  103.         phys_addr_t scratch_page_dma;
  104.         int refcount;
  105.         /* Whether i915 needs to use the dmar apis or not. */
  106.         unsigned int needs_dmar : 1;
  107.         phys_addr_t gma_bus_addr;
  108.         /*  Size of memory reserved for graphics by the BIOS */
  109.         unsigned int stolen_size;
  110.         /* Total number of gtt entries. */
  111.         unsigned int gtt_total_entries;
  112.         /* Part of the gtt that is mappable by the cpu, for those chips where
  113.          * this is not the full gtt. */
  114.         unsigned int gtt_mappable_entries;
  115. } intel_private;
  116.  
  117. #define INTEL_GTT_GEN   intel_private.driver->gen
  118. #define IS_G33          intel_private.driver->is_g33
  119. #define IS_PINEVIEW     intel_private.driver->is_pineview
  120. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  121. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  122.  
  123. static int intel_gtt_setup_scratch_page(void)
  124. {
  125.         struct page *page;
  126.         dma_addr_t dma_addr;
  127.  
  128.         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  129.         if (page == NULL)
  130.         return -ENOMEM;
  131.                 intel_private.scratch_page_dma = page_to_phys(page);
  132.  
  133.         intel_private.scratch_page = page;
  134.  
  135.     return 0;
  136. }
  137.  
  138. static unsigned int intel_gtt_stolen_size(void)
  139. {
  140.     u16 gmch_ctrl;
  141.     u8 rdct;
  142.     int local = 0;
  143.     static const int ddt[4] = { 0, 16, 32, 64 };
  144.     unsigned int stolen_size = 0;
  145.  
  146.     if (INTEL_GTT_GEN == 1)
  147.         return 0; /* no stolen mem on i81x */
  148.  
  149.     pci_read_config_word(intel_private.bridge_dev,
  150.                  I830_GMCH_CTRL, &gmch_ctrl);
  151.  
  152.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  153.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  154.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  155.         case I830_GMCH_GMS_STOLEN_512:
  156.             stolen_size = KB(512);
  157.             break;
  158.         case I830_GMCH_GMS_STOLEN_1024:
  159.             stolen_size = MB(1);
  160.             break;
  161.         case I830_GMCH_GMS_STOLEN_8192:
  162.             stolen_size = MB(8);
  163.             break;
  164.         case I830_GMCH_GMS_LOCAL:
  165.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  166.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  167.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  168.             local = 1;
  169.             break;
  170.         default:
  171.             stolen_size = 0;
  172.             break;
  173.         }
  174.     } else {
  175.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  176.         case I855_GMCH_GMS_STOLEN_1M:
  177.             stolen_size = MB(1);
  178.             break;
  179.         case I855_GMCH_GMS_STOLEN_4M:
  180.             stolen_size = MB(4);
  181.             break;
  182.         case I855_GMCH_GMS_STOLEN_8M:
  183.             stolen_size = MB(8);
  184.             break;
  185.         case I855_GMCH_GMS_STOLEN_16M:
  186.             stolen_size = MB(16);
  187.             break;
  188.         case I855_GMCH_GMS_STOLEN_32M:
  189.             stolen_size = MB(32);
  190.             break;
  191.         case I915_GMCH_GMS_STOLEN_48M:
  192.             stolen_size = MB(48);
  193.             break;
  194.         case I915_GMCH_GMS_STOLEN_64M:
  195.             stolen_size = MB(64);
  196.             break;
  197.         case G33_GMCH_GMS_STOLEN_128M:
  198.             stolen_size = MB(128);
  199.             break;
  200.         case G33_GMCH_GMS_STOLEN_256M:
  201.             stolen_size = MB(256);
  202.             break;
  203.         case INTEL_GMCH_GMS_STOLEN_96M:
  204.             stolen_size = MB(96);
  205.             break;
  206.         case INTEL_GMCH_GMS_STOLEN_160M:
  207.             stolen_size = MB(160);
  208.             break;
  209.         case INTEL_GMCH_GMS_STOLEN_224M:
  210.             stolen_size = MB(224);
  211.             break;
  212.         case INTEL_GMCH_GMS_STOLEN_352M:
  213.             stolen_size = MB(352);
  214.             break;
  215.         default:
  216.             stolen_size = 0;
  217.             break;
  218.         }
  219.     }
  220.  
  221.     if (stolen_size > 0) {
  222.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  223.                stolen_size / KB(1), local ? "local" : "stolen");
  224.     } else {
  225.                 dev_info(&intel_private.bridge_dev->dev,
  226.                        "no pre-allocated video memory detected\n");
  227.         stolen_size = 0;
  228.     }
  229.  
  230.     return stolen_size;
  231. }
  232.  
  233. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  234. {
  235.     u32 pgetbl_ctl, pgetbl_ctl2;
  236.  
  237.     /* ensure that ppgtt is disabled */
  238.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  239.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  240.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  241.  
  242.     /* write the new ggtt size */
  243.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  244.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  245.     pgetbl_ctl |= size_flag;
  246.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  247. }
  248.  
  249. static unsigned int i965_gtt_total_entries(void)
  250. {
  251.     int size;
  252.     u32 pgetbl_ctl;
  253.     u16 gmch_ctl;
  254.  
  255.     pci_read_config_word(intel_private.bridge_dev,
  256.                  I830_GMCH_CTRL, &gmch_ctl);
  257.  
  258.     if (INTEL_GTT_GEN == 5) {
  259.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  260.         case G4x_GMCH_SIZE_1M:
  261.         case G4x_GMCH_SIZE_VT_1M:
  262.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  263.             break;
  264.         case G4x_GMCH_SIZE_VT_1_5M:
  265.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  266.             break;
  267.         case G4x_GMCH_SIZE_2M:
  268.         case G4x_GMCH_SIZE_VT_2M:
  269.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  270.             break;
  271.         }
  272.     }
  273.  
  274.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  275.  
  276.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  277.     case I965_PGETBL_SIZE_128KB:
  278.         size = KB(128);
  279.         break;
  280.     case I965_PGETBL_SIZE_256KB:
  281.         size = KB(256);
  282.         break;
  283.     case I965_PGETBL_SIZE_512KB:
  284.         size = KB(512);
  285.         break;
  286.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  287.     case I965_PGETBL_SIZE_1MB:
  288.         size = KB(1024);
  289.         break;
  290.     case I965_PGETBL_SIZE_2MB:
  291.         size = KB(2048);
  292.         break;
  293.     case I965_PGETBL_SIZE_1_5MB:
  294.         size = KB(1024 + 512);
  295.         break;
  296.     default:
  297.                 dev_info(&intel_private.pcidev->dev,
  298.                          "unknown page table size, assuming 512KB\n");
  299.         size = KB(512);
  300.     }
  301.  
  302.     return size/4;
  303. }
  304.  
  305. static unsigned int intel_gtt_total_entries(void)
  306. {
  307.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  308.         return i965_gtt_total_entries();
  309.         else {
  310.         /* On previous hardware, the GTT size was just what was
  311.          * required to map the aperture.
  312.          */
  313.                 return intel_private.gtt_mappable_entries;
  314.     }
  315. }
  316.  
  317. static unsigned int intel_gtt_mappable_entries(void)
  318. {
  319.     unsigned int aperture_size;
  320.  
  321.     if (INTEL_GTT_GEN == 1) {
  322.         u32 smram_miscc;
  323.  
  324.         pci_read_config_dword(intel_private.bridge_dev,
  325.                       I810_SMRAM_MISCC, &smram_miscc);
  326.  
  327.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  328.                 == I810_GFX_MEM_WIN_32M)
  329.             aperture_size = MB(32);
  330.         else
  331.             aperture_size = MB(64);
  332.     } else if (INTEL_GTT_GEN == 2) {
  333.         u16 gmch_ctrl;
  334.  
  335.         pci_read_config_word(intel_private.bridge_dev,
  336.                      I830_GMCH_CTRL, &gmch_ctrl);
  337.  
  338.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  339.             aperture_size = MB(64);
  340.         else
  341.             aperture_size = MB(128);
  342.     } else {
  343.         /* 9xx supports large sizes, just look at the length */
  344.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  345.     }
  346.  
  347.     return aperture_size >> PAGE_SHIFT;
  348. }
  349.  
  350. static void intel_gtt_teardown_scratch_page(void)
  351. {
  352.    // FreePage(intel_private.scratch_page_dma);
  353. }
  354.  
  355. static void intel_gtt_cleanup(void)
  356. {
  357.     intel_private.driver->cleanup();
  358.  
  359.         iounmap(intel_private.gtt);
  360.         iounmap(intel_private.registers);
  361.  
  362.         intel_gtt_teardown_scratch_page();
  363. }
  364.  
  365. /* Certain Gen5 chipsets require require idling the GPU before
  366.  * unmapping anything from the GTT when VT-d is enabled.
  367.  */
  368. static inline int needs_ilk_vtd_wa(void)
  369. {
  370. #ifdef CONFIG_INTEL_IOMMU
  371.         const unsigned short gpu_devid = intel_private.pcidev->device;
  372.  
  373.         /* Query intel_iommu to see if we need the workaround. Presumably that
  374.          * was loaded first.
  375.          */
  376.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  377.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  378.              intel_iommu_gfx_mapped)
  379.                 return 1;
  380. #endif
  381.         return 0;
  382. }
  383.  
  384. static bool intel_gtt_can_wc(void)
  385. {
  386.         if (INTEL_GTT_GEN <= 2)
  387.                 return false;
  388.  
  389.         if (INTEL_GTT_GEN >= 6)
  390.                 return false;
  391.  
  392.         /* Reports of major corruption with ILK vt'd enabled */
  393.         if (needs_ilk_vtd_wa())
  394.                 return false;
  395.  
  396.         return true;
  397. }
  398.  
  399. static int intel_gtt_init(void)
  400. {
  401.     u32 gtt_map_size;
  402.         int ret, bar;
  403.  
  404.     ret = intel_private.driver->setup();
  405.     if (ret != 0)
  406.         return ret;
  407.  
  408.         intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
  409.         intel_private.gtt_total_entries = intel_gtt_total_entries();
  410.  
  411.     /* save the PGETBL reg for resume */
  412.     intel_private.PGETBL_save =
  413.         readl(intel_private.registers+I810_PGETBL_CTL)
  414.             & ~I810_PGETBL_ENABLED;
  415.     /* we only ever restore the register when enabling the PGTBL... */
  416.     if (HAS_PGTBL_EN)
  417.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  418.  
  419.         dev_info(&intel_private.bridge_dev->dev,
  420.                         "detected gtt size: %dK total, %dK mappable\n",
  421.                         intel_private.gtt_total_entries * 4,
  422.                         intel_private.gtt_mappable_entries * 4);
  423.  
  424.         gtt_map_size = intel_private.gtt_total_entries * 4;
  425.  
  426.         intel_private.gtt = NULL;
  427.         if (intel_private.gtt == NULL)
  428.                 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
  429.                                             gtt_map_size);
  430.         if (intel_private.gtt == NULL) {
  431.         intel_private.driver->cleanup();
  432.                 iounmap(intel_private.registers);
  433.         return -ENOMEM;
  434.     }
  435.  
  436. #if IS_ENABLED(CONFIG_AGP_INTEL)
  437.         global_cache_flush();   /* FIXME: ? */
  438. #endif
  439.  
  440.         intel_private.stolen_size = intel_gtt_stolen_size();
  441.  
  442.         intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  443.  
  444.     ret = intel_gtt_setup_scratch_page();
  445.     if (ret != 0) {
  446.         intel_gtt_cleanup();
  447.         return ret;
  448.     }
  449.  
  450.         if (INTEL_GTT_GEN <= 2)
  451.                 bar = I810_GMADR_BAR;
  452.         else
  453.                 bar = I915_GMADR_BAR;
  454.  
  455.         intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
  456.         return 0;
  457. }
  458.  
  459.  
  460. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  461.                              unsigned int flags)
  462. {
  463.         u32 pte_flags = I810_PTE_VALID;
  464.  
  465.         if (flags ==  AGP_USER_CACHED_MEMORY)
  466.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  467.  
  468.         writel(addr | pte_flags, intel_private.gtt + entry);
  469. }
  470.  
  471. bool intel_enable_gtt(void)
  472. {
  473.     u8 __iomem *reg;
  474.  
  475.     if (INTEL_GTT_GEN == 2) {
  476.         u16 gmch_ctrl;
  477.  
  478.         pci_read_config_word(intel_private.bridge_dev,
  479.                      I830_GMCH_CTRL, &gmch_ctrl);
  480.         gmch_ctrl |= I830_GMCH_ENABLED;
  481.         pci_write_config_word(intel_private.bridge_dev,
  482.                       I830_GMCH_CTRL, gmch_ctrl);
  483.  
  484.         pci_read_config_word(intel_private.bridge_dev,
  485.                      I830_GMCH_CTRL, &gmch_ctrl);
  486.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  487.                         dev_err(&intel_private.pcidev->dev,
  488.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  489.                 gmch_ctrl);
  490.             return false;
  491.         }
  492.     }
  493.  
  494.     /* On the resume path we may be adjusting the PGTBL value, so
  495.      * be paranoid and flush all chipset write buffers...
  496.      */
  497.     if (INTEL_GTT_GEN >= 3)
  498.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  499.  
  500.     reg = intel_private.registers+I810_PGETBL_CTL;
  501.     writel(intel_private.PGETBL_save, reg);
  502.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  503.                 dev_err(&intel_private.pcidev->dev,
  504.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  505.             readl(reg), intel_private.PGETBL_save);
  506.         return false;
  507.     }
  508.  
  509.     if (INTEL_GTT_GEN >= 3)
  510.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  511.  
  512.     return true;
  513. }
  514.  
  515. static bool i830_check_flags(unsigned int flags)
  516. {
  517.         switch (flags) {
  518.         case 0:
  519.         case AGP_PHYS_MEMORY:
  520.         case AGP_USER_CACHED_MEMORY:
  521.         case AGP_USER_MEMORY:
  522.                 return true;
  523.         }
  524.  
  525.         return false;
  526. }
  527.  
  528. void intel_gtt_insert_sg_entries(struct sg_table *st,
  529.                                  unsigned int pg_start,
  530.                                  unsigned int flags)
  531. {
  532.         struct scatterlist *sg;
  533.         unsigned int len, m;
  534.     int i, j;
  535.  
  536.         j = pg_start;
  537.  
  538.         /* sg may merge pages, but we have to separate
  539.          * per-page addr for GTT */
  540.         for_each_sg(st->sgl, sg, st->nents, i) {
  541.                 len = sg_dma_len(sg) >> PAGE_SHIFT;
  542.                 for (m = 0; m < len; m++) {
  543.                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  544.         intel_private.driver->write_entry(addr, j, flags);
  545.         j++;
  546.                 }
  547.         }
  548.         readl(intel_private.gtt+j-1);
  549. }
  550. EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
  551.  
  552. #if IS_ENABLED(CONFIG_AGP_INTEL)
  553. static void intel_gtt_insert_pages(unsigned int first_entry,
  554.                                    unsigned int num_entries,
  555.                                    struct page **pages,
  556.                                    unsigned int flags)
  557. {
  558.     int i, j;
  559.  
  560.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  561.                 dma_addr_t addr = page_to_phys(pages[i]);
  562.         intel_private.driver->write_entry(addr,
  563.                           j, flags);
  564.     }
  565.     readl(intel_private.gtt+j-1);
  566. }
  567.  
  568. static int intel_fake_agp_insert_entries(struct agp_memory *mem,
  569.                                          off_t pg_start, int type)
  570. {
  571.         int ret = -EINVAL;
  572.  
  573.         if (intel_private.clear_fake_agp) {
  574.                 int start = intel_private.stolen_size / PAGE_SIZE;
  575.                 int end = intel_private.gtt_mappable_entries;
  576.                 intel_gtt_clear_range(start, end - start);
  577.                 intel_private.clear_fake_agp = false;
  578.         }
  579.  
  580.         if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
  581.                 return i810_insert_dcache_entries(mem, pg_start, type);
  582.  
  583.         if (mem->page_count == 0)
  584.                 goto out;
  585.  
  586.         if (pg_start + mem->page_count > intel_private.gtt_total_entries)
  587.                 goto out_err;
  588.  
  589.         if (type != mem->type)
  590.                 goto out_err;
  591.  
  592.         if (!intel_private.driver->check_flags(type))
  593.                 goto out_err;
  594.  
  595.         if (!mem->is_flushed)
  596.                 global_cache_flush();
  597.  
  598.         if (intel_private.needs_dmar) {
  599.                 struct sg_table st;
  600.  
  601.                 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
  602.                 if (ret != 0)
  603.                         return ret;
  604.  
  605.                 intel_gtt_insert_sg_entries(&st, pg_start, type);
  606.                 mem->sg_list = st.sgl;
  607.                 mem->num_sg = st.nents;
  608.         } else
  609.                 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
  610.                                        type);
  611.  
  612. out:
  613.         ret = 0;
  614. out_err:
  615.         mem->is_flushed = true;
  616.         return ret;
  617. }
  618. #endif
  619.  
  620. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  621. {
  622.         unsigned int i;
  623.  
  624.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  625.                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
  626.                                                   i, 0);
  627.         }
  628.         readl(intel_private.gtt+i-1);
  629. }
  630. static void intel_i915_setup_chipset_flush(void)
  631. {
  632.         int ret;
  633.         u32 temp;
  634.  
  635.         pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
  636.         if (!(temp & 0x1)) {
  637. //              intel_alloc_chipset_flush_resource();
  638. //              intel_private.resource_valid = 1;
  639. //              pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  640.         } else {
  641.                 temp &= ~1;
  642.  
  643.                 intel_private.resource_valid = 1;
  644.                 intel_private.ifp_resource.start = temp;
  645.                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
  646. //              ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  647.                 /* some BIOSes reserve this area in a pnp some don't */
  648. //              if (ret)
  649. //                      intel_private.resource_valid = 0;
  650.         }
  651. }
  652.  
  653. static void intel_i965_g33_setup_chipset_flush(void)
  654. {
  655.         u32 temp_hi, temp_lo;
  656.         int ret;
  657.  
  658.         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
  659.         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
  660.  
  661.         if (!(temp_lo & 0x1)) {
  662.  
  663. //              intel_alloc_chipset_flush_resource();
  664.  
  665. //              intel_private.resource_valid = 1;
  666. //              pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
  667. //                      upper_32_bits(intel_private.ifp_resource.start));
  668. //              pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  669.         } else {
  670.                 u64 l64;
  671.  
  672.                 temp_lo &= ~0x1;
  673.                 l64 = ((u64)temp_hi << 32) | temp_lo;
  674.  
  675.                 intel_private.resource_valid = 1;
  676.                 intel_private.ifp_resource.start = l64;
  677.                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
  678. //              ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  679.                 /* some BIOSes reserve this area in a pnp some don't */
  680. //              if (ret)
  681. //                      intel_private.resource_valid = 0;
  682.         }
  683. }
  684.  
  685. static void intel_i9xx_setup_flush(void)
  686. {
  687.     /* return if already configured */
  688.     if (intel_private.ifp_resource.start)
  689.         return;
  690.  
  691.     if (INTEL_GTT_GEN == 6)
  692.         return;
  693.  
  694.     /* setup a resource for this object */
  695.         intel_private.ifp_resource.name = "Intel Flush Page";
  696.         intel_private.ifp_resource.flags = IORESOURCE_MEM;
  697.  
  698.     /* Setup chipset flush for 915 */
  699.         if (IS_G33 || INTEL_GTT_GEN >= 4) {
  700.                 intel_i965_g33_setup_chipset_flush();
  701.         } else {
  702.                 intel_i915_setup_chipset_flush();
  703.         }
  704.  
  705.         if (intel_private.ifp_resource.start)
  706.         intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
  707.     if (!intel_private.i9xx_flush_page)
  708.         dev_err(&intel_private.pcidev->dev,
  709.             "can't ioremap flush page - no chipset flushing\n");
  710. }
  711.  
  712. static void i9xx_cleanup(void)
  713. {
  714.         if (intel_private.i9xx_flush_page)
  715.                 iounmap(intel_private.i9xx_flush_page);
  716. //      if (intel_private.resource_valid)
  717. //              release_resource(&intel_private.ifp_resource);
  718.         intel_private.ifp_resource.start = 0;
  719.         intel_private.resource_valid = 0;
  720. }
  721.  
  722. static void i9xx_chipset_flush(void)
  723. {
  724.     if (intel_private.i9xx_flush_page)
  725.         writel(1, intel_private.i9xx_flush_page);
  726. }
  727.  
  728. static void i965_write_entry(dma_addr_t addr,
  729.                              unsigned int entry,
  730.                              unsigned int flags)
  731. {
  732.         u32 pte_flags;
  733.  
  734.         pte_flags = I810_PTE_VALID;
  735.         if (flags == AGP_USER_CACHED_MEMORY)
  736.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  737.  
  738.         /* Shift high bits down */
  739.         addr |= (addr >> 28) & 0xf0;
  740.         writel(addr | pte_flags, intel_private.gtt + entry);
  741. }
  742.  
  743. static int i9xx_setup(void)
  744. {
  745.         phys_addr_t reg_addr;
  746.         int size = KB(512);
  747.  
  748.         reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
  749.  
  750.         intel_private.registers = ioremap(reg_addr, size);
  751.     if (!intel_private.registers)
  752.         return -ENOMEM;
  753.  
  754.         switch (INTEL_GTT_GEN) {
  755.         case 3:
  756.                 intel_private.gtt_phys_addr =
  757.                         pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
  758.                 break;
  759.         case 5:
  760.                 intel_private.gtt_phys_addr = reg_addr + MB(2);
  761.             break;
  762.         default:
  763.                 intel_private.gtt_phys_addr = reg_addr + KB(512);
  764.             break;
  765.         }
  766.  
  767.     intel_i9xx_setup_flush();
  768.  
  769.     return 0;
  770. }
  771.  
  772. static const struct intel_gtt_driver i915_gtt_driver = {
  773.         .gen = 3,
  774.         .has_pgtbl_enable = 1,
  775.         .setup = i9xx_setup,
  776.         .cleanup = i9xx_cleanup,
  777.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  778.         .write_entry = i830_write_entry,
  779.         .dma_mask_size = 32,
  780.         .check_flags = i830_check_flags,
  781.         .chipset_flush = i9xx_chipset_flush,
  782. };
  783. static const struct intel_gtt_driver g33_gtt_driver = {
  784.         .gen = 3,
  785.         .is_g33 = 1,
  786.         .setup = i9xx_setup,
  787.         .cleanup = i9xx_cleanup,
  788.         .write_entry = i965_write_entry,
  789.         .dma_mask_size = 36,
  790.         .check_flags = i830_check_flags,
  791.         .chipset_flush = i9xx_chipset_flush,
  792. };
  793. static const struct intel_gtt_driver pineview_gtt_driver = {
  794.         .gen = 3,
  795.         .is_pineview = 1, .is_g33 = 1,
  796.         .setup = i9xx_setup,
  797.         .cleanup = i9xx_cleanup,
  798.         .write_entry = i965_write_entry,
  799.         .dma_mask_size = 36,
  800.         .check_flags = i830_check_flags,
  801.         .chipset_flush = i9xx_chipset_flush,
  802. };
  803. static const struct intel_gtt_driver i965_gtt_driver = {
  804.         .gen = 4,
  805.         .has_pgtbl_enable = 1,
  806.         .setup = i9xx_setup,
  807.         .cleanup = i9xx_cleanup,
  808.         .write_entry = i965_write_entry,
  809.         .dma_mask_size = 36,
  810.         .check_flags = i830_check_flags,
  811.         .chipset_flush = i9xx_chipset_flush,
  812. };
  813. static const struct intel_gtt_driver g4x_gtt_driver = {
  814.         .gen = 5,
  815.         .setup = i9xx_setup,
  816.         .cleanup = i9xx_cleanup,
  817.         .write_entry = i965_write_entry,
  818.         .dma_mask_size = 36,
  819.         .check_flags = i830_check_flags,
  820.         .chipset_flush = i9xx_chipset_flush,
  821. };
  822. static const struct intel_gtt_driver ironlake_gtt_driver = {
  823.         .gen = 5,
  824.         .is_ironlake = 1,
  825.         .setup = i9xx_setup,
  826.         .cleanup = i9xx_cleanup,
  827.         .write_entry = i965_write_entry,
  828.         .dma_mask_size = 36,
  829.         .check_flags = i830_check_flags,
  830.         .chipset_flush = i9xx_chipset_flush,
  831. };
  832.  
  833. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  834.  * driver and gmch_driver must be non-null, and find_gmch will determine
  835.  * which one should be used if a gmch_chip_id is present.
  836.  */
  837. static const struct intel_gtt_driver_description {
  838.     unsigned int gmch_chip_id;
  839.     char *name;
  840.     const struct intel_gtt_driver *gtt_driver;
  841. } intel_gtt_chipsets[] = {
  842.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  843.                 &i915_gtt_driver },
  844.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  845.                 &i915_gtt_driver },
  846.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  847.                 &i915_gtt_driver },
  848.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  849.                 &i915_gtt_driver },
  850.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  851.                 &i915_gtt_driver },
  852.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  853.                 &i915_gtt_driver },
  854.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  855.                 &i965_gtt_driver },
  856.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  857.                 &i965_gtt_driver },
  858.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  859.                 &i965_gtt_driver },
  860.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  861.                 &i965_gtt_driver },
  862.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  863.                 &i965_gtt_driver },
  864.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  865.                 &i965_gtt_driver },
  866.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  867.                 &g33_gtt_driver },
  868.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  869.                 &g33_gtt_driver },
  870.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  871.                 &g33_gtt_driver },
  872.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  873.                 &pineview_gtt_driver },
  874.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  875.                 &pineview_gtt_driver },
  876.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  877.                 &g4x_gtt_driver },
  878.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  879.                 &g4x_gtt_driver },
  880.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  881.                 &g4x_gtt_driver },
  882.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  883.                 &g4x_gtt_driver },
  884.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  885.                 &g4x_gtt_driver },
  886.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  887.                 &g4x_gtt_driver },
  888.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  889.                 &g4x_gtt_driver },
  890.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  891.             "HD Graphics", &ironlake_gtt_driver },
  892.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  893.             "HD Graphics", &ironlake_gtt_driver },
  894.     { 0, NULL, NULL }
  895. };
  896.  
  897. static int find_gmch(u16 device)
  898. {
  899.     struct pci_dev *gmch_device;
  900.  
  901.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  902.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  903.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  904.                          device, gmch_device);
  905.     }
  906.  
  907.     if (!gmch_device)
  908.         return 0;
  909.  
  910.     intel_private.pcidev = gmch_device;
  911.     return 1;
  912. }
  913.  
  914. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  915.                       struct agp_bridge_data *bridge)
  916. {
  917.     int i, mask;
  918.  
  919.         /*
  920.          * Can be called from the fake agp driver but also directly from
  921.          * drm/i915.ko. Hence we need to check whether everything is set up
  922.          * already.
  923.          */
  924.         if (intel_private.driver) {
  925.                 intel_private.refcount++;
  926.                 return 1;
  927.         }
  928.  
  929.  
  930.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  931.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  932.             intel_private.driver =
  933.                 intel_gtt_chipsets[i].gtt_driver;
  934.             break;
  935.         }
  936.     }
  937.  
  938.     if (!intel_private.driver)
  939.         return 0;
  940.  
  941.         intel_private.refcount++;
  942.  
  943. #if IS_ENABLED(CONFIG_AGP_INTEL)
  944.         if (bridge) {
  945.                 bridge->driver = &intel_fake_agp_driver;
  946.                 bridge->dev_private_data = &intel_private;
  947.                 bridge->dev = bridge_pdev;
  948.         }
  949. #endif
  950.  
  951.     intel_private.bridge_dev = bridge_pdev;
  952.  
  953.         dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  954.  
  955.     mask = intel_private.driver->dma_mask_size;
  956. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  957. //        dev_err(&intel_private.pcidev->dev,
  958. //            "set gfx device dma mask %d-bit failed!\n", mask);
  959. //    else
  960. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  961. //                        DMA_BIT_MASK(mask));
  962.  
  963.         if (intel_gtt_init() != 0) {
  964. //              intel_gmch_remove();
  965.  
  966.         return 0;
  967.         }
  968.  
  969.     return 1;
  970. }
  971. EXPORT_SYMBOL(intel_gmch_probe);
  972.  
  973. void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
  974.                    phys_addr_t *mappable_base, unsigned long *mappable_end)
  975. {
  976.         *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
  977.         *stolen_size = intel_private.stolen_size;
  978.         *mappable_base = intel_private.gma_bus_addr;
  979.         *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
  980. }
  981. EXPORT_SYMBOL(intel_gtt_get);
  982.  
  983. void intel_gtt_chipset_flush(void)
  984. {
  985.         if (intel_private.driver->chipset_flush)
  986.                 intel_private.driver->chipset_flush();
  987. }
  988. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  989.  
  990.  
  991. MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
  992. MODULE_LICENSE("GPL and additional rights");
  993.