Subversion Repositories Kolibri OS

Rev

Rev 4389 | Rev 5354 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <syscall.h>
  19.  
  20. #include <linux/module.h>
  21. #include <errno-base.h>
  22. #include <linux/pci.h>
  23. #include <linux/kernel.h>
  24. #include <linux/export.h>
  25. #include <linux/scatterlist.h>
  26.  
  27. //#include <linux/pagemap.h>
  28. //#include <linux/agp_backend.h>
  29. //#include <asm/smp.h>
  30. #include <linux/spinlock.h>
  31. #include "agp.h"
  32. #include "intel-agp.h"
  33. #include <drm/intel-gtt.h>
  34.  
  35.  
  36. struct pci_dev *
  37. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  38.  
  39.  
  40. #define PCI_VENDOR_ID_INTEL             0x8086
  41. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  42. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  43. #define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
  44. #define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
  45. #define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
  46. #define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
  47.  
  48.  
  49. #define AGP_NORMAL_MEMORY 0
  50.  
  51. #define AGP_USER_TYPES (1 << 16)
  52. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  53. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  54.  
  55.  
  56.  
  57. /*
  58.  * If we have Intel graphics, we're not going to have anything other than
  59.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  60.  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
  61.  * Only newer chipsets need to bother with this, of course.
  62.  */
  63. #ifdef CONFIG_INTEL_IOMMU
  64. #define USE_PCI_DMA_API 1
  65. #else
  66. #define USE_PCI_DMA_API 0
  67. #endif
  68.  
  69. struct intel_gtt_driver {
  70.     unsigned int gen : 8;
  71.     unsigned int is_g33 : 1;
  72.     unsigned int is_pineview : 1;
  73.     unsigned int is_ironlake : 1;
  74.     unsigned int has_pgtbl_enable : 1;
  75.     unsigned int dma_mask_size : 8;
  76.     /* Chipset specific GTT setup */
  77.     int (*setup)(void);
  78.     /* This should undo anything done in ->setup() save the unmapping
  79.      * of the mmio register file, that's done in the generic code. */
  80.     void (*cleanup)(void);
  81.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  82.     /* Flags is a more or less chipset specific opaque value.
  83.      * For chipsets that need to support old ums (non-gem) code, this
  84.      * needs to be identical to the various supported agp memory types! */
  85.     bool (*check_flags)(unsigned int flags);
  86.     void (*chipset_flush)(void);
  87. };
  88.  
  89. static struct _intel_private {
  90.     const struct intel_gtt_driver *driver;
  91.     struct pci_dev *pcidev; /* device one */
  92.     struct pci_dev *bridge_dev;
  93.     u8 __iomem *registers;
  94.         phys_addr_t gtt_phys_addr;
  95.     u32 PGETBL_save;
  96.     u32 __iomem *gtt;       /* I915G */
  97.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  98.     int num_dcache_entries;
  99.     void __iomem *i9xx_flush_page;
  100.     char *i81x_gtt_table;
  101.     struct resource ifp_resource;
  102.     int resource_valid;
  103.     struct page *scratch_page;
  104.         phys_addr_t scratch_page_dma;
  105.         int refcount;
  106.         /* Whether i915 needs to use the dmar apis or not. */
  107.         unsigned int needs_dmar : 1;
  108.         phys_addr_t gma_bus_addr;
  109.         /*  Size of memory reserved for graphics by the BIOS */
  110.         unsigned int stolen_size;
  111.         /* Total number of gtt entries. */
  112.         unsigned int gtt_total_entries;
  113.         /* Part of the gtt that is mappable by the cpu, for those chips where
  114.          * this is not the full gtt. */
  115.         unsigned int gtt_mappable_entries;
  116. } intel_private;
  117.  
  118. #define INTEL_GTT_GEN   intel_private.driver->gen
  119. #define IS_G33          intel_private.driver->is_g33
  120. #define IS_PINEVIEW     intel_private.driver->is_pineview
  121. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  122. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  123.  
  124. static int intel_gtt_setup_scratch_page(void)
  125. {
  126.         struct page *page;
  127.         dma_addr_t dma_addr;
  128.  
  129.         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  130.         if (page == NULL)
  131.         return -ENOMEM;
  132.                 intel_private.scratch_page_dma = page_to_phys(page);
  133.  
  134.         intel_private.scratch_page = page;
  135.  
  136.     return 0;
  137. }
  138.  
  139. static unsigned int intel_gtt_stolen_size(void)
  140. {
  141.     u16 gmch_ctrl;
  142.     u8 rdct;
  143.     int local = 0;
  144.     static const int ddt[4] = { 0, 16, 32, 64 };
  145.     unsigned int stolen_size = 0;
  146.  
  147.     if (INTEL_GTT_GEN == 1)
  148.         return 0; /* no stolen mem on i81x */
  149.  
  150.     pci_read_config_word(intel_private.bridge_dev,
  151.                  I830_GMCH_CTRL, &gmch_ctrl);
  152.  
  153.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  154.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  155.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  156.         case I830_GMCH_GMS_STOLEN_512:
  157.             stolen_size = KB(512);
  158.             break;
  159.         case I830_GMCH_GMS_STOLEN_1024:
  160.             stolen_size = MB(1);
  161.             break;
  162.         case I830_GMCH_GMS_STOLEN_8192:
  163.             stolen_size = MB(8);
  164.             break;
  165.         case I830_GMCH_GMS_LOCAL:
  166.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  167.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  168.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  169.             local = 1;
  170.             break;
  171.         default:
  172.             stolen_size = 0;
  173.             break;
  174.         }
  175.     } else {
  176.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  177.         case I855_GMCH_GMS_STOLEN_1M:
  178.             stolen_size = MB(1);
  179.             break;
  180.         case I855_GMCH_GMS_STOLEN_4M:
  181.             stolen_size = MB(4);
  182.             break;
  183.         case I855_GMCH_GMS_STOLEN_8M:
  184.             stolen_size = MB(8);
  185.             break;
  186.         case I855_GMCH_GMS_STOLEN_16M:
  187.             stolen_size = MB(16);
  188.             break;
  189.         case I855_GMCH_GMS_STOLEN_32M:
  190.             stolen_size = MB(32);
  191.             break;
  192.         case I915_GMCH_GMS_STOLEN_48M:
  193.             stolen_size = MB(48);
  194.             break;
  195.         case I915_GMCH_GMS_STOLEN_64M:
  196.             stolen_size = MB(64);
  197.             break;
  198.         case G33_GMCH_GMS_STOLEN_128M:
  199.             stolen_size = MB(128);
  200.             break;
  201.         case G33_GMCH_GMS_STOLEN_256M:
  202.             stolen_size = MB(256);
  203.             break;
  204.         case INTEL_GMCH_GMS_STOLEN_96M:
  205.             stolen_size = MB(96);
  206.             break;
  207.         case INTEL_GMCH_GMS_STOLEN_160M:
  208.             stolen_size = MB(160);
  209.             break;
  210.         case INTEL_GMCH_GMS_STOLEN_224M:
  211.             stolen_size = MB(224);
  212.             break;
  213.         case INTEL_GMCH_GMS_STOLEN_352M:
  214.             stolen_size = MB(352);
  215.             break;
  216.         default:
  217.             stolen_size = 0;
  218.             break;
  219.         }
  220.     }
  221.  
  222.     if (stolen_size > 0) {
  223.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  224.                stolen_size / KB(1), local ? "local" : "stolen");
  225.     } else {
  226.                 dev_info(&intel_private.bridge_dev->dev,
  227.                        "no pre-allocated video memory detected\n");
  228.         stolen_size = 0;
  229.     }
  230.  
  231.     return stolen_size;
  232. }
  233.  
  234. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  235. {
  236.     u32 pgetbl_ctl, pgetbl_ctl2;
  237.  
  238.     /* ensure that ppgtt is disabled */
  239.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  240.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  241.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  242.  
  243.     /* write the new ggtt size */
  244.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  245.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  246.     pgetbl_ctl |= size_flag;
  247.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  248. }
  249.  
  250. static unsigned int i965_gtt_total_entries(void)
  251. {
  252.     int size;
  253.     u32 pgetbl_ctl;
  254.     u16 gmch_ctl;
  255.  
  256.     pci_read_config_word(intel_private.bridge_dev,
  257.                  I830_GMCH_CTRL, &gmch_ctl);
  258.  
  259.     if (INTEL_GTT_GEN == 5) {
  260.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  261.         case G4x_GMCH_SIZE_1M:
  262.         case G4x_GMCH_SIZE_VT_1M:
  263.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  264.             break;
  265.         case G4x_GMCH_SIZE_VT_1_5M:
  266.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  267.             break;
  268.         case G4x_GMCH_SIZE_2M:
  269.         case G4x_GMCH_SIZE_VT_2M:
  270.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  271.             break;
  272.         }
  273.     }
  274.  
  275.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  276.  
  277.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  278.     case I965_PGETBL_SIZE_128KB:
  279.         size = KB(128);
  280.         break;
  281.     case I965_PGETBL_SIZE_256KB:
  282.         size = KB(256);
  283.         break;
  284.     case I965_PGETBL_SIZE_512KB:
  285.         size = KB(512);
  286.         break;
  287.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  288.     case I965_PGETBL_SIZE_1MB:
  289.         size = KB(1024);
  290.         break;
  291.     case I965_PGETBL_SIZE_2MB:
  292.         size = KB(2048);
  293.         break;
  294.     case I965_PGETBL_SIZE_1_5MB:
  295.         size = KB(1024 + 512);
  296.         break;
  297.     default:
  298.                 dev_info(&intel_private.pcidev->dev,
  299.                          "unknown page table size, assuming 512KB\n");
  300.         size = KB(512);
  301.     }
  302.  
  303.     return size/4;
  304. }
  305.  
  306. static unsigned int intel_gtt_total_entries(void)
  307. {
  308.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  309.         return i965_gtt_total_entries();
  310.         else {
  311.         /* On previous hardware, the GTT size was just what was
  312.          * required to map the aperture.
  313.          */
  314.                 return intel_private.gtt_mappable_entries;
  315.     }
  316. }
  317.  
  318. static unsigned int intel_gtt_mappable_entries(void)
  319. {
  320.     unsigned int aperture_size;
  321.  
  322.     if (INTEL_GTT_GEN == 1) {
  323.         u32 smram_miscc;
  324.  
  325.         pci_read_config_dword(intel_private.bridge_dev,
  326.                       I810_SMRAM_MISCC, &smram_miscc);
  327.  
  328.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  329.                 == I810_GFX_MEM_WIN_32M)
  330.             aperture_size = MB(32);
  331.         else
  332.             aperture_size = MB(64);
  333.     } else if (INTEL_GTT_GEN == 2) {
  334.         u16 gmch_ctrl;
  335.  
  336.         pci_read_config_word(intel_private.bridge_dev,
  337.                      I830_GMCH_CTRL, &gmch_ctrl);
  338.  
  339.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  340.             aperture_size = MB(64);
  341.         else
  342.             aperture_size = MB(128);
  343.     } else {
  344.         /* 9xx supports large sizes, just look at the length */
  345.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  346.     }
  347.  
  348.     return aperture_size >> PAGE_SHIFT;
  349. }
  350.  
  351. static void intel_gtt_teardown_scratch_page(void)
  352. {
  353.    // FreePage(intel_private.scratch_page_dma);
  354. }
  355.  
  356. static void intel_gtt_cleanup(void)
  357. {
  358.     intel_private.driver->cleanup();
  359.  
  360.         iounmap(intel_private.gtt);
  361.         iounmap(intel_private.registers);
  362.  
  363.         intel_gtt_teardown_scratch_page();
  364. }
  365.  
  366. /* Certain Gen5 chipsets require require idling the GPU before
  367.  * unmapping anything from the GTT when VT-d is enabled.
  368.  */
  369. static inline int needs_ilk_vtd_wa(void)
  370. {
  371. #ifdef CONFIG_INTEL_IOMMU
  372.         const unsigned short gpu_devid = intel_private.pcidev->device;
  373.  
  374.         /* Query intel_iommu to see if we need the workaround. Presumably that
  375.          * was loaded first.
  376.          */
  377.         if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
  378.              gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
  379.              intel_iommu_gfx_mapped)
  380.                 return 1;
  381. #endif
  382.         return 0;
  383. }
  384.  
  385. static bool intel_gtt_can_wc(void)
  386. {
  387.         if (INTEL_GTT_GEN <= 2)
  388.                 return false;
  389.  
  390.         if (INTEL_GTT_GEN >= 6)
  391.                 return false;
  392.  
  393.         /* Reports of major corruption with ILK vt'd enabled */
  394.         if (needs_ilk_vtd_wa())
  395.                 return false;
  396.  
  397.         return true;
  398. }
  399.  
  400. static int intel_gtt_init(void)
  401. {
  402.     u32 gtt_map_size;
  403.         int ret, bar;
  404.  
  405.     ret = intel_private.driver->setup();
  406.     if (ret != 0)
  407.         return ret;
  408.  
  409.         intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
  410.         intel_private.gtt_total_entries = intel_gtt_total_entries();
  411.  
  412.     /* save the PGETBL reg for resume */
  413.     intel_private.PGETBL_save =
  414.         readl(intel_private.registers+I810_PGETBL_CTL)
  415.             & ~I810_PGETBL_ENABLED;
  416.     /* we only ever restore the register when enabling the PGTBL... */
  417.     if (HAS_PGTBL_EN)
  418.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  419.  
  420.         dev_info(&intel_private.bridge_dev->dev,
  421.                         "detected gtt size: %dK total, %dK mappable\n",
  422.                         intel_private.gtt_total_entries * 4,
  423.                         intel_private.gtt_mappable_entries * 4);
  424.  
  425.         gtt_map_size = intel_private.gtt_total_entries * 4;
  426.  
  427.         intel_private.gtt = NULL;
  428.         if (intel_private.gtt == NULL)
  429.                 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
  430.                                             gtt_map_size);
  431.         if (intel_private.gtt == NULL) {
  432.         intel_private.driver->cleanup();
  433.                 iounmap(intel_private.registers);
  434.         return -ENOMEM;
  435.     }
  436.  
  437. #if IS_ENABLED(CONFIG_AGP_INTEL)
  438.         global_cache_flush();   /* FIXME: ? */
  439. #endif
  440.  
  441.         intel_private.stolen_size = intel_gtt_stolen_size();
  442.  
  443.         intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  444.  
  445.     ret = intel_gtt_setup_scratch_page();
  446.     if (ret != 0) {
  447.         intel_gtt_cleanup();
  448.         return ret;
  449.     }
  450.  
  451.         if (INTEL_GTT_GEN <= 2)
  452.                 bar = I810_GMADR_BAR;
  453.         else
  454.                 bar = I915_GMADR_BAR;
  455.  
  456.         intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
  457.         return 0;
  458. }
  459.  
  460.  
  461. static void i830_write_entry(dma_addr_t addr, unsigned int entry,
  462.                              unsigned int flags)
  463. {
  464.         u32 pte_flags = I810_PTE_VALID;
  465.  
  466.         if (flags ==  AGP_USER_CACHED_MEMORY)
  467.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  468.  
  469.         writel(addr | pte_flags, intel_private.gtt + entry);
  470. }
  471.  
  472. bool intel_enable_gtt(void)
  473. {
  474.     u8 __iomem *reg;
  475.  
  476.     if (INTEL_GTT_GEN == 2) {
  477.         u16 gmch_ctrl;
  478.  
  479.         pci_read_config_word(intel_private.bridge_dev,
  480.                      I830_GMCH_CTRL, &gmch_ctrl);
  481.         gmch_ctrl |= I830_GMCH_ENABLED;
  482.         pci_write_config_word(intel_private.bridge_dev,
  483.                       I830_GMCH_CTRL, gmch_ctrl);
  484.  
  485.         pci_read_config_word(intel_private.bridge_dev,
  486.                      I830_GMCH_CTRL, &gmch_ctrl);
  487.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  488.                         dev_err(&intel_private.pcidev->dev,
  489.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  490.                 gmch_ctrl);
  491.             return false;
  492.         }
  493.     }
  494.  
  495.     /* On the resume path we may be adjusting the PGTBL value, so
  496.      * be paranoid and flush all chipset write buffers...
  497.      */
  498.     if (INTEL_GTT_GEN >= 3)
  499.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  500.  
  501.     reg = intel_private.registers+I810_PGETBL_CTL;
  502.     writel(intel_private.PGETBL_save, reg);
  503.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  504.                 dev_err(&intel_private.pcidev->dev,
  505.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  506.             readl(reg), intel_private.PGETBL_save);
  507.         return false;
  508.     }
  509.  
  510.     if (INTEL_GTT_GEN >= 3)
  511.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  512.  
  513.     return true;
  514. }
  515.  
  516. static bool i830_check_flags(unsigned int flags)
  517. {
  518.         switch (flags) {
  519.         case 0:
  520.         case AGP_PHYS_MEMORY:
  521.         case AGP_USER_CACHED_MEMORY:
  522.         case AGP_USER_MEMORY:
  523.                 return true;
  524.         }
  525.  
  526.         return false;
  527. }
  528.  
  529. void intel_gtt_insert_sg_entries(struct sg_table *st,
  530.                                  unsigned int pg_start,
  531.                                  unsigned int flags)
  532. {
  533.         struct scatterlist *sg;
  534.         unsigned int len, m;
  535.     int i, j;
  536.  
  537.         j = pg_start;
  538.  
  539.         /* sg may merge pages, but we have to separate
  540.          * per-page addr for GTT */
  541.         for_each_sg(st->sgl, sg, st->nents, i) {
  542.                 len = sg_dma_len(sg) >> PAGE_SHIFT;
  543.                 for (m = 0; m < len; m++) {
  544.                         dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  545.         intel_private.driver->write_entry(addr, j, flags);
  546.         j++;
  547.                 }
  548.         }
  549.         readl(intel_private.gtt+j-1);
  550. }
  551. EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
  552.  
  553. #if IS_ENABLED(CONFIG_AGP_INTEL)
  554. static void intel_gtt_insert_pages(unsigned int first_entry,
  555.                                    unsigned int num_entries,
  556.                                    struct page **pages,
  557.                                    unsigned int flags)
  558. {
  559.     int i, j;
  560.  
  561.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  562.                 dma_addr_t addr = page_to_phys(pages[i]);
  563.         intel_private.driver->write_entry(addr,
  564.                           j, flags);
  565.     }
  566.     readl(intel_private.gtt+j-1);
  567. }
  568.  
  569. static int intel_fake_agp_insert_entries(struct agp_memory *mem,
  570.                                          off_t pg_start, int type)
  571. {
  572.         int ret = -EINVAL;
  573.  
  574.         if (intel_private.clear_fake_agp) {
  575.                 int start = intel_private.stolen_size / PAGE_SIZE;
  576.                 int end = intel_private.gtt_mappable_entries;
  577.                 intel_gtt_clear_range(start, end - start);
  578.                 intel_private.clear_fake_agp = false;
  579.         }
  580.  
  581.         if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
  582.                 return i810_insert_dcache_entries(mem, pg_start, type);
  583.  
  584.         if (mem->page_count == 0)
  585.                 goto out;
  586.  
  587.         if (pg_start + mem->page_count > intel_private.gtt_total_entries)
  588.                 goto out_err;
  589.  
  590.         if (type != mem->type)
  591.                 goto out_err;
  592.  
  593.         if (!intel_private.driver->check_flags(type))
  594.                 goto out_err;
  595.  
  596.         if (!mem->is_flushed)
  597.                 global_cache_flush();
  598.  
  599.         if (intel_private.needs_dmar) {
  600.                 struct sg_table st;
  601.  
  602.                 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
  603.                 if (ret != 0)
  604.                         return ret;
  605.  
  606.                 intel_gtt_insert_sg_entries(&st, pg_start, type);
  607.                 mem->sg_list = st.sgl;
  608.                 mem->num_sg = st.nents;
  609.         } else
  610.                 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
  611.                                        type);
  612.  
  613. out:
  614.         ret = 0;
  615. out_err:
  616.         mem->is_flushed = true;
  617.         return ret;
  618. }
  619. #endif
  620.  
  621. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  622. {
  623.         unsigned int i;
  624.  
  625.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  626.                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
  627.                                                   i, 0);
  628.         }
  629.         readl(intel_private.gtt+i-1);
  630. }
  631. static void intel_i915_setup_chipset_flush(void)
  632. {
  633.         int ret;
  634.         u32 temp;
  635.  
  636.         pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
  637.         if (!(temp & 0x1)) {
  638. //              intel_alloc_chipset_flush_resource();
  639. //              intel_private.resource_valid = 1;
  640. //              pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  641.         } else {
  642.                 temp &= ~1;
  643.  
  644.                 intel_private.resource_valid = 1;
  645.                 intel_private.ifp_resource.start = temp;
  646.                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
  647. //              ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  648.                 /* some BIOSes reserve this area in a pnp some don't */
  649. //              if (ret)
  650. //                      intel_private.resource_valid = 0;
  651.         }
  652. }
  653.  
  654. static void intel_i965_g33_setup_chipset_flush(void)
  655. {
  656.         u32 temp_hi, temp_lo;
  657.         int ret;
  658.  
  659.         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
  660.         pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
  661.  
  662.         if (!(temp_lo & 0x1)) {
  663.  
  664. //              intel_alloc_chipset_flush_resource();
  665.  
  666. //              intel_private.resource_valid = 1;
  667. //              pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
  668. //                      upper_32_bits(intel_private.ifp_resource.start));
  669. //              pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
  670.         } else {
  671.                 u64 l64;
  672.  
  673.                 temp_lo &= ~0x1;
  674.                 l64 = ((u64)temp_hi << 32) | temp_lo;
  675.  
  676.                 intel_private.resource_valid = 1;
  677.                 intel_private.ifp_resource.start = l64;
  678.                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
  679. //              ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
  680.                 /* some BIOSes reserve this area in a pnp some don't */
  681. //              if (ret)
  682. //                      intel_private.resource_valid = 0;
  683.         }
  684. }
  685.  
  686. static void intel_i9xx_setup_flush(void)
  687. {
  688.     /* return if already configured */
  689.     if (intel_private.ifp_resource.start)
  690.         return;
  691.  
  692.     if (INTEL_GTT_GEN == 6)
  693.         return;
  694.  
  695.     /* setup a resource for this object */
  696.         intel_private.ifp_resource.name = "Intel Flush Page";
  697.         intel_private.ifp_resource.flags = IORESOURCE_MEM;
  698.  
  699.     /* Setup chipset flush for 915 */
  700.         if (IS_G33 || INTEL_GTT_GEN >= 4) {
  701.                 intel_i965_g33_setup_chipset_flush();
  702.         } else {
  703.                 intel_i915_setup_chipset_flush();
  704.         }
  705.  
  706.         if (intel_private.ifp_resource.start)
  707.         intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
  708.     if (!intel_private.i9xx_flush_page)
  709.         dev_err(&intel_private.pcidev->dev,
  710.             "can't ioremap flush page - no chipset flushing\n");
  711. }
  712.  
  713. static void i9xx_cleanup(void)
  714. {
  715.         if (intel_private.i9xx_flush_page)
  716.                 iounmap(intel_private.i9xx_flush_page);
  717. //      if (intel_private.resource_valid)
  718. //              release_resource(&intel_private.ifp_resource);
  719.         intel_private.ifp_resource.start = 0;
  720.         intel_private.resource_valid = 0;
  721. }
  722.  
  723. static void i9xx_chipset_flush(void)
  724. {
  725.     if (intel_private.i9xx_flush_page)
  726.         writel(1, intel_private.i9xx_flush_page);
  727. }
  728.  
  729. static void i965_write_entry(dma_addr_t addr,
  730.                              unsigned int entry,
  731.                              unsigned int flags)
  732. {
  733.         u32 pte_flags;
  734.  
  735.         pte_flags = I810_PTE_VALID;
  736.         if (flags == AGP_USER_CACHED_MEMORY)
  737.                 pte_flags |= I830_PTE_SYSTEM_CACHED;
  738.  
  739.         /* Shift high bits down */
  740.         addr |= (addr >> 28) & 0xf0;
  741.         writel(addr | pte_flags, intel_private.gtt + entry);
  742. }
  743.  
  744. static int i9xx_setup(void)
  745. {
  746.         phys_addr_t reg_addr;
  747.         int size = KB(512);
  748.  
  749.         reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
  750.  
  751.         intel_private.registers = ioremap(reg_addr, size);
  752.     if (!intel_private.registers)
  753.         return -ENOMEM;
  754.  
  755.         switch (INTEL_GTT_GEN) {
  756.         case 3:
  757.                 intel_private.gtt_phys_addr =
  758.                         pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
  759.                 break;
  760.         case 5:
  761.                 intel_private.gtt_phys_addr = reg_addr + MB(2);
  762.             break;
  763.         default:
  764.                 intel_private.gtt_phys_addr = reg_addr + KB(512);
  765.             break;
  766.         }
  767.  
  768.     intel_i9xx_setup_flush();
  769.  
  770.     return 0;
  771. }
  772.  
  773. static const struct intel_gtt_driver i915_gtt_driver = {
  774.         .gen = 3,
  775.         .has_pgtbl_enable = 1,
  776.         .setup = i9xx_setup,
  777.         .cleanup = i9xx_cleanup,
  778.         /* i945 is the last gpu to need phys mem (for overlay and cursors). */
  779.         .write_entry = i830_write_entry,
  780.         .dma_mask_size = 32,
  781.         .check_flags = i830_check_flags,
  782.         .chipset_flush = i9xx_chipset_flush,
  783. };
  784. static const struct intel_gtt_driver g33_gtt_driver = {
  785.         .gen = 3,
  786.         .is_g33 = 1,
  787.         .setup = i9xx_setup,
  788.         .cleanup = i9xx_cleanup,
  789.         .write_entry = i965_write_entry,
  790.         .dma_mask_size = 36,
  791.         .check_flags = i830_check_flags,
  792.         .chipset_flush = i9xx_chipset_flush,
  793. };
  794. static const struct intel_gtt_driver pineview_gtt_driver = {
  795.         .gen = 3,
  796.         .is_pineview = 1, .is_g33 = 1,
  797.         .setup = i9xx_setup,
  798.         .cleanup = i9xx_cleanup,
  799.         .write_entry = i965_write_entry,
  800.         .dma_mask_size = 36,
  801.         .check_flags = i830_check_flags,
  802.         .chipset_flush = i9xx_chipset_flush,
  803. };
  804. static const struct intel_gtt_driver i965_gtt_driver = {
  805.         .gen = 4,
  806.         .has_pgtbl_enable = 1,
  807.         .setup = i9xx_setup,
  808.         .cleanup = i9xx_cleanup,
  809.         .write_entry = i965_write_entry,
  810.         .dma_mask_size = 36,
  811.         .check_flags = i830_check_flags,
  812.         .chipset_flush = i9xx_chipset_flush,
  813. };
  814. static const struct intel_gtt_driver g4x_gtt_driver = {
  815.         .gen = 5,
  816.         .setup = i9xx_setup,
  817.         .cleanup = i9xx_cleanup,
  818.         .write_entry = i965_write_entry,
  819.         .dma_mask_size = 36,
  820.         .check_flags = i830_check_flags,
  821.         .chipset_flush = i9xx_chipset_flush,
  822. };
  823. static const struct intel_gtt_driver ironlake_gtt_driver = {
  824.         .gen = 5,
  825.         .is_ironlake = 1,
  826.         .setup = i9xx_setup,
  827.         .cleanup = i9xx_cleanup,
  828.         .write_entry = i965_write_entry,
  829.         .dma_mask_size = 36,
  830.         .check_flags = i830_check_flags,
  831.         .chipset_flush = i9xx_chipset_flush,
  832. };
  833.  
  834. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  835.  * driver and gmch_driver must be non-null, and find_gmch will determine
  836.  * which one should be used if a gmch_chip_id is present.
  837.  */
  838. static const struct intel_gtt_driver_description {
  839.     unsigned int gmch_chip_id;
  840.     char *name;
  841.     const struct intel_gtt_driver *gtt_driver;
  842. } intel_gtt_chipsets[] = {
  843.         { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
  844.                 &i915_gtt_driver },
  845.         { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
  846.                 &i915_gtt_driver },
  847.         { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
  848.                 &i915_gtt_driver },
  849.         { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
  850.                 &i915_gtt_driver },
  851.         { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
  852.                 &i915_gtt_driver },
  853.         { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
  854.                 &i915_gtt_driver },
  855.         { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
  856.                 &i965_gtt_driver },
  857.         { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
  858.                 &i965_gtt_driver },
  859.         { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
  860.                 &i965_gtt_driver },
  861.         { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
  862.                 &i965_gtt_driver },
  863.         { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
  864.                 &i965_gtt_driver },
  865.         { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
  866.                 &i965_gtt_driver },
  867.         { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
  868.                 &g33_gtt_driver },
  869.         { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
  870.                 &g33_gtt_driver },
  871.         { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
  872.                 &g33_gtt_driver },
  873.         { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
  874.                 &pineview_gtt_driver },
  875.         { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
  876.                 &pineview_gtt_driver },
  877.         { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
  878.                 &g4x_gtt_driver },
  879.         { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
  880.                 &g4x_gtt_driver },
  881.         { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
  882.                 &g4x_gtt_driver },
  883.         { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
  884.                 &g4x_gtt_driver },
  885.         { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
  886.                 &g4x_gtt_driver },
  887.         { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
  888.                 &g4x_gtt_driver },
  889.         { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
  890.                 &g4x_gtt_driver },
  891.         { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
  892.             "HD Graphics", &ironlake_gtt_driver },
  893.         { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
  894.             "HD Graphics", &ironlake_gtt_driver },
  895.     { 0, NULL, NULL }
  896. };
  897.  
  898. static int find_gmch(u16 device)
  899. {
  900.     struct pci_dev *gmch_device;
  901.  
  902.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  903.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  904.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  905.                          device, gmch_device);
  906.     }
  907.  
  908.     if (!gmch_device)
  909.         return 0;
  910.  
  911.     intel_private.pcidev = gmch_device;
  912.     return 1;
  913. }
  914.  
  915. int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
  916.                       struct agp_bridge_data *bridge)
  917. {
  918.     int i, mask;
  919.  
  920.         /*
  921.          * Can be called from the fake agp driver but also directly from
  922.          * drm/i915.ko. Hence we need to check whether everything is set up
  923.          * already.
  924.          */
  925.         if (intel_private.driver) {
  926.                 intel_private.refcount++;
  927.                 return 1;
  928.         }
  929.  
  930.  
  931.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  932.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  933.             intel_private.driver =
  934.                 intel_gtt_chipsets[i].gtt_driver;
  935.             break;
  936.         }
  937.     }
  938.  
  939.     if (!intel_private.driver)
  940.         return 0;
  941.  
  942.         intel_private.refcount++;
  943.  
  944. #if IS_ENABLED(CONFIG_AGP_INTEL)
  945.         if (bridge) {
  946.                 bridge->driver = &intel_fake_agp_driver;
  947.                 bridge->dev_private_data = &intel_private;
  948.                 bridge->dev = bridge_pdev;
  949.         }
  950. #endif
  951.  
  952.     intel_private.bridge_dev = bridge_pdev;
  953.  
  954.         dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  955.  
  956.     mask = intel_private.driver->dma_mask_size;
  957. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  958. //        dev_err(&intel_private.pcidev->dev,
  959. //            "set gfx device dma mask %d-bit failed!\n", mask);
  960. //    else
  961. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  962. //                        DMA_BIT_MASK(mask));
  963.  
  964.         if (intel_gtt_init() != 0) {
  965. //              intel_gmch_remove();
  966.  
  967.         return 0;
  968.         }
  969.  
  970.     return 1;
  971. }
  972. EXPORT_SYMBOL(intel_gmch_probe);
  973.  
  974. void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
  975.                    phys_addr_t *mappable_base, unsigned long *mappable_end)
  976. {
  977.         *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
  978.         *stolen_size = intel_private.stolen_size;
  979.         *mappable_base = intel_private.gma_bus_addr;
  980.         *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
  981. }
  982. EXPORT_SYMBOL(intel_gtt_get);
  983.  
  984. void intel_gtt_chipset_flush(void)
  985. {
  986.         if (intel_private.driver->chipset_flush)
  987.                 intel_private.driver->chipset_flush();
  988. }
  989. EXPORT_SYMBOL(intel_gtt_chipset_flush);
  990.  
  991.  
  992. MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
  993. MODULE_LICENSE("GPL and additional rights");
  994.