Subversion Repositories Kolibri OS

Rev

Rev 2327 | Rev 2339 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <linux/module.h>
  19. #include <errno-base.h>
  20. #include <linux/pci.h>
  21. #include <linux/kernel.h>
  22. //#include <linux/pagemap.h>
  23. //#include <linux/agp_backend.h>
  24. //#include <asm/smp.h>
  25. #include <linux/spinlock.h>
  26. #include "agp.h"
  27. #include "intel-agp.h"
  28. #include "intel-gtt.h"
  29.  
  30. #include <syscall.h>
  31.  
  32. struct pci_dev *
  33. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  34.  
  35. static bool intel_enable_gtt(void);
  36.  
  37.  
  38. #define PCI_VENDOR_ID_INTEL             0x8086
  39. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  40. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  41.  
  42.  
  43. #define AGP_NORMAL_MEMORY 0
  44.  
  45. #define AGP_USER_TYPES (1 << 16)
  46. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  47. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  48.  
  49.  
  50. static inline int pci_read_config_word(struct pci_dev *dev, int where,
  51.                     u16 *val)
  52. {
  53.     *val = PciRead16(dev->busnr, dev->devfn, where);
  54.     return 1;
  55. }
  56.  
  57. static inline int pci_read_config_dword(struct pci_dev *dev, int where,
  58.                     u32 *val)
  59. {
  60.     *val = PciRead32(dev->busnr, dev->devfn, where);
  61.     return 1;
  62. }
  63.  
  64. static inline int pci_write_config_word(struct pci_dev *dev, int where,
  65.                     u16 val)
  66. {
  67.     PciWrite16(dev->busnr, dev->devfn, where, val);
  68.     return 1;
  69. }
  70.  
  71. /*
  72.  * If we have Intel graphics, we're not going to have anything other than
  73.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  74.  * on the Intel IOMMU support (CONFIG_DMAR).
  75.  * Only newer chipsets need to bother with this, of course.
  76.  */
  77. #ifdef CONFIG_DMAR
  78. #define USE_PCI_DMA_API 1
  79. #else
  80. #define USE_PCI_DMA_API 0
  81. #endif
  82.  
  83. struct intel_gtt_driver {
  84.     unsigned int gen : 8;
  85.     unsigned int is_g33 : 1;
  86.     unsigned int is_pineview : 1;
  87.     unsigned int is_ironlake : 1;
  88.     unsigned int has_pgtbl_enable : 1;
  89.     unsigned int dma_mask_size : 8;
  90.     /* Chipset specific GTT setup */
  91.     int (*setup)(void);
  92.     /* This should undo anything done in ->setup() save the unmapping
  93.      * of the mmio register file, that's done in the generic code. */
  94.     void (*cleanup)(void);
  95.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  96.     /* Flags is a more or less chipset specific opaque value.
  97.      * For chipsets that need to support old ums (non-gem) code, this
  98.      * needs to be identical to the various supported agp memory types! */
  99.     bool (*check_flags)(unsigned int flags);
  100.     void (*chipset_flush)(void);
  101. };
  102.  
  103. static struct _intel_private {
  104.     struct intel_gtt base;
  105.     const struct intel_gtt_driver *driver;
  106.     struct pci_dev *pcidev; /* device one */
  107.     struct pci_dev *bridge_dev;
  108.     u8 __iomem *registers;
  109.     phys_addr_t gtt_bus_addr;
  110.     phys_addr_t gma_bus_addr;
  111.     u32 PGETBL_save;
  112.     u32 __iomem *gtt;       /* I915G */
  113.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  114.     int num_dcache_entries;
  115.     void __iomem *i9xx_flush_page;
  116.     char *i81x_gtt_table;
  117.     struct resource ifp_resource;
  118.     int resource_valid;
  119.     struct page *scratch_page;
  120.     dma_addr_t scratch_page_dma;
  121. } intel_private;
  122.  
  123. #define INTEL_GTT_GEN   intel_private.driver->gen
  124. #define IS_G33          intel_private.driver->is_g33
  125. #define IS_PINEVIEW     intel_private.driver->is_pineview
  126. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  127. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  128.  
  129. static int intel_gtt_setup_scratch_page(void)
  130. {
  131.     addr_t page;
  132.  
  133.     page = AllocPage();
  134.     if (page == 0)
  135.         return -ENOMEM;
  136.  
  137.     intel_private.scratch_page_dma = page;
  138.     intel_private.scratch_page = NULL;
  139.  
  140.     return 0;
  141. }
  142.  
  143. static unsigned int intel_gtt_stolen_size(void)
  144. {
  145.     u16 gmch_ctrl;
  146.     u8 rdct;
  147.     int local = 0;
  148.     static const int ddt[4] = { 0, 16, 32, 64 };
  149.     unsigned int stolen_size = 0;
  150.  
  151.     if (INTEL_GTT_GEN == 1)
  152.         return 0; /* no stolen mem on i81x */
  153.  
  154.     pci_read_config_word(intel_private.bridge_dev,
  155.                  I830_GMCH_CTRL, &gmch_ctrl);
  156.  
  157.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  158.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  159.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  160.         case I830_GMCH_GMS_STOLEN_512:
  161.             stolen_size = KB(512);
  162.             break;
  163.         case I830_GMCH_GMS_STOLEN_1024:
  164.             stolen_size = MB(1);
  165.             break;
  166.         case I830_GMCH_GMS_STOLEN_8192:
  167.             stolen_size = MB(8);
  168.             break;
  169.         case I830_GMCH_GMS_LOCAL:
  170.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  171.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  172.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  173.             local = 1;
  174.             break;
  175.         default:
  176.             stolen_size = 0;
  177.             break;
  178.         }
  179.     } else if (INTEL_GTT_GEN == 6) {
  180.         /*
  181.          * SandyBridge has new memory control reg at 0x50.w
  182.          */
  183.         u16 snb_gmch_ctl;
  184.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  185.         switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
  186.         case SNB_GMCH_GMS_STOLEN_32M:
  187.             stolen_size = MB(32);
  188.             break;
  189.         case SNB_GMCH_GMS_STOLEN_64M:
  190.             stolen_size = MB(64);
  191.             break;
  192.         case SNB_GMCH_GMS_STOLEN_96M:
  193.             stolen_size = MB(96);
  194.             break;
  195.         case SNB_GMCH_GMS_STOLEN_128M:
  196.             stolen_size = MB(128);
  197.             break;
  198.         case SNB_GMCH_GMS_STOLEN_160M:
  199.             stolen_size = MB(160);
  200.             break;
  201.         case SNB_GMCH_GMS_STOLEN_192M:
  202.             stolen_size = MB(192);
  203.             break;
  204.         case SNB_GMCH_GMS_STOLEN_224M:
  205.             stolen_size = MB(224);
  206.             break;
  207.         case SNB_GMCH_GMS_STOLEN_256M:
  208.             stolen_size = MB(256);
  209.             break;
  210.         case SNB_GMCH_GMS_STOLEN_288M:
  211.             stolen_size = MB(288);
  212.             break;
  213.         case SNB_GMCH_GMS_STOLEN_320M:
  214.             stolen_size = MB(320);
  215.             break;
  216.         case SNB_GMCH_GMS_STOLEN_352M:
  217.             stolen_size = MB(352);
  218.             break;
  219.         case SNB_GMCH_GMS_STOLEN_384M:
  220.             stolen_size = MB(384);
  221.             break;
  222.         case SNB_GMCH_GMS_STOLEN_416M:
  223.             stolen_size = MB(416);
  224.             break;
  225.         case SNB_GMCH_GMS_STOLEN_448M:
  226.             stolen_size = MB(448);
  227.             break;
  228.         case SNB_GMCH_GMS_STOLEN_480M:
  229.             stolen_size = MB(480);
  230.             break;
  231.         case SNB_GMCH_GMS_STOLEN_512M:
  232.             stolen_size = MB(512);
  233.             break;
  234.         }
  235.     } else {
  236.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  237.         case I855_GMCH_GMS_STOLEN_1M:
  238.             stolen_size = MB(1);
  239.             break;
  240.         case I855_GMCH_GMS_STOLEN_4M:
  241.             stolen_size = MB(4);
  242.             break;
  243.         case I855_GMCH_GMS_STOLEN_8M:
  244.             stolen_size = MB(8);
  245.             break;
  246.         case I855_GMCH_GMS_STOLEN_16M:
  247.             stolen_size = MB(16);
  248.             break;
  249.         case I855_GMCH_GMS_STOLEN_32M:
  250.             stolen_size = MB(32);
  251.             break;
  252.         case I915_GMCH_GMS_STOLEN_48M:
  253.             stolen_size = MB(48);
  254.             break;
  255.         case I915_GMCH_GMS_STOLEN_64M:
  256.             stolen_size = MB(64);
  257.             break;
  258.         case G33_GMCH_GMS_STOLEN_128M:
  259.             stolen_size = MB(128);
  260.             break;
  261.         case G33_GMCH_GMS_STOLEN_256M:
  262.             stolen_size = MB(256);
  263.             break;
  264.         case INTEL_GMCH_GMS_STOLEN_96M:
  265.             stolen_size = MB(96);
  266.             break;
  267.         case INTEL_GMCH_GMS_STOLEN_160M:
  268.             stolen_size = MB(160);
  269.             break;
  270.         case INTEL_GMCH_GMS_STOLEN_224M:
  271.             stolen_size = MB(224);
  272.             break;
  273.         case INTEL_GMCH_GMS_STOLEN_352M:
  274.             stolen_size = MB(352);
  275.             break;
  276.         default:
  277.             stolen_size = 0;
  278.             break;
  279.         }
  280.     }
  281.  
  282.     if (stolen_size > 0) {
  283.                 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
  284.                stolen_size / KB(1), local ? "local" : "stolen");
  285.     } else {
  286.                 dev_info(&intel_private.bridge_dev->dev,
  287.                        "no pre-allocated video memory detected\n");
  288.         stolen_size = 0;
  289.     }
  290.  
  291.     return stolen_size;
  292. }
  293.  
  294. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  295. {
  296.     u32 pgetbl_ctl, pgetbl_ctl2;
  297.  
  298.     /* ensure that ppgtt is disabled */
  299.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  300.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  301.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  302.  
  303.     /* write the new ggtt size */
  304.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  305.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  306.     pgetbl_ctl |= size_flag;
  307.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  308. }
  309.  
  310. static unsigned int i965_gtt_total_entries(void)
  311. {
  312.     int size;
  313.     u32 pgetbl_ctl;
  314.     u16 gmch_ctl;
  315.  
  316.     pci_read_config_word(intel_private.bridge_dev,
  317.                  I830_GMCH_CTRL, &gmch_ctl);
  318.  
  319.     if (INTEL_GTT_GEN == 5) {
  320.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  321.         case G4x_GMCH_SIZE_1M:
  322.         case G4x_GMCH_SIZE_VT_1M:
  323.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  324.             break;
  325.         case G4x_GMCH_SIZE_VT_1_5M:
  326.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  327.             break;
  328.         case G4x_GMCH_SIZE_2M:
  329.         case G4x_GMCH_SIZE_VT_2M:
  330.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  331.             break;
  332.         }
  333.     }
  334.  
  335.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  336.  
  337.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  338.     case I965_PGETBL_SIZE_128KB:
  339.         size = KB(128);
  340.         break;
  341.     case I965_PGETBL_SIZE_256KB:
  342.         size = KB(256);
  343.         break;
  344.     case I965_PGETBL_SIZE_512KB:
  345.         size = KB(512);
  346.         break;
  347.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  348.     case I965_PGETBL_SIZE_1MB:
  349.         size = KB(1024);
  350.         break;
  351.     case I965_PGETBL_SIZE_2MB:
  352.         size = KB(2048);
  353.         break;
  354.     case I965_PGETBL_SIZE_1_5MB:
  355.         size = KB(1024 + 512);
  356.         break;
  357.     default:
  358.                 dev_info(&intel_private.pcidev->dev,
  359.                          "unknown page table size, assuming 512KB\n");
  360.         size = KB(512);
  361.     }
  362.  
  363.     return size/4;
  364. }
  365.  
  366. static unsigned int intel_gtt_total_entries(void)
  367. {
  368.     int size;
  369.  
  370.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  371.         return i965_gtt_total_entries();
  372.     else if (INTEL_GTT_GEN == 6) {
  373.         u16 snb_gmch_ctl;
  374.  
  375.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  376.         switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
  377.         default:
  378.         case SNB_GTT_SIZE_0M:
  379.             printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
  380.             size = MB(0);
  381.             break;
  382.         case SNB_GTT_SIZE_1M:
  383.             size = MB(1);
  384.             break;
  385.         case SNB_GTT_SIZE_2M:
  386.             size = MB(2);
  387.             break;
  388.         }
  389.         return size/4;
  390.     } else {
  391.         /* On previous hardware, the GTT size was just what was
  392.          * required to map the aperture.
  393.          */
  394.         return intel_private.base.gtt_mappable_entries;
  395.     }
  396. }
  397.  
  398. static unsigned int intel_gtt_mappable_entries(void)
  399. {
  400.     unsigned int aperture_size;
  401.  
  402.     if (INTEL_GTT_GEN == 1) {
  403.         u32 smram_miscc;
  404.  
  405.         pci_read_config_dword(intel_private.bridge_dev,
  406.                       I810_SMRAM_MISCC, &smram_miscc);
  407.  
  408.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  409.                 == I810_GFX_MEM_WIN_32M)
  410.             aperture_size = MB(32);
  411.         else
  412.             aperture_size = MB(64);
  413.     } else if (INTEL_GTT_GEN == 2) {
  414.         u16 gmch_ctrl;
  415.  
  416.         pci_read_config_word(intel_private.bridge_dev,
  417.                      I830_GMCH_CTRL, &gmch_ctrl);
  418.  
  419.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  420.             aperture_size = MB(64);
  421.         else
  422.             aperture_size = MB(128);
  423.     } else {
  424.         /* 9xx supports large sizes, just look at the length */
  425.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  426.     }
  427.  
  428.     return aperture_size >> PAGE_SHIFT;
  429. }
  430.  
  431. static void intel_gtt_teardown_scratch_page(void)
  432. {
  433.    // FreePage(intel_private.scratch_page_dma);
  434. }
  435.  
  436. static void intel_gtt_cleanup(void)
  437. {
  438.     intel_private.driver->cleanup();
  439.  
  440.     FreeKernelSpace(intel_private.gtt);
  441.     FreeKernelSpace(intel_private.registers);
  442.  
  443.   //  intel_gtt_teardown_scratch_page();
  444. }
  445.  
  446. static int intel_gtt_init(void)
  447. {
  448.     u32 gtt_map_size;
  449.     int ret;
  450.  
  451.     ENTER();
  452.  
  453.     ret = intel_private.driver->setup();
  454.     if (ret != 0)
  455.     {
  456.         LEAVE();
  457.         return ret;
  458.     };
  459.  
  460.  
  461.     intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
  462.     intel_private.base.gtt_total_entries = intel_gtt_total_entries();
  463.  
  464.     /* save the PGETBL reg for resume */
  465.     intel_private.PGETBL_save =
  466.         readl(intel_private.registers+I810_PGETBL_CTL)
  467.             & ~I810_PGETBL_ENABLED;
  468.     /* we only ever restore the register when enabling the PGTBL... */
  469.     if (HAS_PGTBL_EN)
  470.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  471.  
  472.     dbgprintf("detected gtt size: %dK total, %dK mappable\n",
  473.             intel_private.base.gtt_total_entries * 4,
  474.             intel_private.base.gtt_mappable_entries * 4);
  475.  
  476.     gtt_map_size = intel_private.base.gtt_total_entries * 4;
  477.  
  478.     intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
  479.                     gtt_map_size, PG_SW+PG_NOCACHE);
  480.     if (!intel_private.gtt) {
  481.         intel_private.driver->cleanup();
  482.         FreeKernelSpace(intel_private.registers);
  483.         return -ENOMEM;
  484.     }
  485.  
  486.     asm volatile("wbinvd");
  487.  
  488.     intel_private.base.stolen_size = intel_gtt_stolen_size();
  489.  
  490.     intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  491.  
  492.     ret = intel_gtt_setup_scratch_page();
  493.     if (ret != 0) {
  494.         intel_gtt_cleanup();
  495.         return ret;
  496.     }
  497.  
  498.     intel_enable_gtt();
  499.  
  500.     LEAVE();
  501.  
  502.     return 0;
  503. }
  504.  
  505. static bool intel_enable_gtt(void)
  506. {
  507.     u32 gma_addr;
  508.     u8 __iomem *reg;
  509.  
  510.     if (INTEL_GTT_GEN <= 2)
  511.         pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  512.                       &gma_addr);
  513.     else
  514.         pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  515.                       &gma_addr);
  516.  
  517.     intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  518.  
  519.     if (INTEL_GTT_GEN >= 6)
  520.         return true;
  521.  
  522.     if (INTEL_GTT_GEN == 2) {
  523.         u16 gmch_ctrl;
  524.  
  525.         pci_read_config_word(intel_private.bridge_dev,
  526.                      I830_GMCH_CTRL, &gmch_ctrl);
  527.         gmch_ctrl |= I830_GMCH_ENABLED;
  528.         pci_write_config_word(intel_private.bridge_dev,
  529.                       I830_GMCH_CTRL, gmch_ctrl);
  530.  
  531.         pci_read_config_word(intel_private.bridge_dev,
  532.                      I830_GMCH_CTRL, &gmch_ctrl);
  533.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  534.                         dev_err(&intel_private.pcidev->dev,
  535.                                 "failed to enable the GTT: GMCH_CTRL=%x\n",
  536.                 gmch_ctrl);
  537.             return false;
  538.         }
  539.     }
  540.  
  541.     /* On the resume path we may be adjusting the PGTBL value, so
  542.      * be paranoid and flush all chipset write buffers...
  543.      */
  544.     if (INTEL_GTT_GEN >= 3)
  545.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  546.  
  547.     reg = intel_private.registers+I810_PGETBL_CTL;
  548.     writel(intel_private.PGETBL_save, reg);
  549.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  550.                 dev_err(&intel_private.pcidev->dev,
  551.                         "failed to enable the GTT: PGETBL=%x [expected %x]\n",
  552.             readl(reg), intel_private.PGETBL_save);
  553.         return false;
  554.     }
  555.  
  556.     if (INTEL_GTT_GEN >= 3)
  557.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  558.  
  559.     return true;
  560. }
  561.  
  562.  
  563. void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
  564.                 struct page **pages, unsigned int flags)
  565. {
  566.     int i, j;
  567.  
  568.     for (i = 0, j = first_entry; i < num_entries; i++, j++) {
  569.         dma_addr_t addr = (dma_addr_t)(pages[i]);
  570.         intel_private.driver->write_entry(addr,
  571.                           j, flags);
  572.     }
  573.     readl(intel_private.gtt+j-1);
  574. }
  575.  
  576.  
  577. void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
  578. {
  579.         unsigned int i;
  580.  
  581.         for (i = first_entry; i < (first_entry + num_entries); i++) {
  582.                 intel_private.driver->write_entry(intel_private.scratch_page_dma,
  583.                                                   i, 0);
  584.         }
  585.         readl(intel_private.gtt+i-1);
  586. }
  587.  
  588.  
  589. static void intel_i9xx_setup_flush(void)
  590. {
  591.     /* return if already configured */
  592.     if (intel_private.ifp_resource.start)
  593.         return;
  594.  
  595.     if (INTEL_GTT_GEN == 6)
  596.         return;
  597.  
  598. #if 0
  599.     /* setup a resource for this object */
  600.     intel_private.ifp_resource.name = "Intel Flush Page";
  601.     intel_private.ifp_resource.flags = IORESOURCE_MEM;
  602.  
  603.     /* Setup chipset flush for 915 */
  604.     if (IS_G33 || INTEL_GTT_GEN >= 4) {
  605.         intel_i965_g33_setup_chipset_flush();
  606.     } else {
  607.         intel_i915_setup_chipset_flush();
  608.     }
  609.  
  610.     if (intel_private.ifp_resource.start)
  611.         intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  612.     if (!intel_private.i9xx_flush_page)
  613.         dev_err(&intel_private.pcidev->dev,
  614.             "can't ioremap flush page - no chipset flushing\n");
  615. #endif
  616.  
  617. }
  618.  
  619. static void i9xx_chipset_flush(void)
  620. {
  621.     if (intel_private.i9xx_flush_page)
  622.         writel(1, intel_private.i9xx_flush_page);
  623. }
  624.  
  625. static bool gen6_check_flags(unsigned int flags)
  626. {
  627.     return true;
  628. }
  629.  
  630. static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
  631.                  unsigned int flags)
  632. {
  633.     unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  634.     unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  635.     u32 pte_flags;
  636.  
  637.     if (type_mask == AGP_USER_MEMORY)
  638.         pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
  639.     else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  640.         pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
  641.         if (gfdt)
  642.             pte_flags |= GEN6_PTE_GFDT;
  643.     } else { /* set 'normal'/'cached' to LLC by default */
  644.         pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  645.         if (gfdt)
  646.             pte_flags |= GEN6_PTE_GFDT;
  647.     }
  648.  
  649.     /* gen6 has bit11-4 for physical addr bit39-32 */
  650.     addr |= (addr >> 28) & 0xff0;
  651.     writel(addr | pte_flags, intel_private.gtt + entry);
  652. }
  653.  
  654. static void gen6_cleanup(void)
  655. {
  656. }
  657.  
  658. static int i9xx_setup(void)
  659. {
  660.     u32 reg_addr;
  661.  
  662.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  663.  
  664.     reg_addr &= 0xfff80000;
  665.  
  666.     intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
  667.  
  668.     if (!intel_private.registers)
  669.         return -ENOMEM;
  670.  
  671.     if (INTEL_GTT_GEN == 3) {
  672.         u32 gtt_addr;
  673.  
  674.         pci_read_config_dword(intel_private.pcidev,
  675.                       I915_PTEADDR, &gtt_addr);
  676.         intel_private.gtt_bus_addr = gtt_addr;
  677.     } else {
  678.         u32 gtt_offset;
  679.  
  680.         switch (INTEL_GTT_GEN) {
  681.         case 5:
  682.         case 6:
  683.             gtt_offset = MB(2);
  684.             break;
  685.         case 4:
  686.         default:
  687.             gtt_offset =  KB(512);
  688.             break;
  689.         }
  690.         intel_private.gtt_bus_addr = reg_addr + gtt_offset;
  691.     }
  692.  
  693.     intel_i9xx_setup_flush();
  694.  
  695.     return 0;
  696. }
  697.  
  698. static const struct intel_gtt_driver sandybridge_gtt_driver = {
  699.     .gen = 6,
  700.     .setup = i9xx_setup,
  701.     .cleanup = gen6_cleanup,
  702.     .write_entry = gen6_write_entry,
  703.     .dma_mask_size = 40,
  704.     .check_flags = gen6_check_flags,
  705.     .chipset_flush = i9xx_chipset_flush,
  706. };
  707.  
  708. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  709.  * driver and gmch_driver must be non-null, and find_gmch will determine
  710.  * which one should be used if a gmch_chip_id is present.
  711.  */
  712. static const struct intel_gtt_driver_description {
  713.     unsigned int gmch_chip_id;
  714.     char *name;
  715.     const struct intel_gtt_driver *gtt_driver;
  716. } intel_gtt_chipsets[] = {
  717.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
  718.         "Sandybridge", &sandybridge_gtt_driver },
  719.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
  720.         "Sandybridge", &sandybridge_gtt_driver },
  721.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
  722.         "Sandybridge", &sandybridge_gtt_driver },
  723.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
  724.         "Sandybridge", &sandybridge_gtt_driver },
  725.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
  726.         "Sandybridge", &sandybridge_gtt_driver },
  727.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
  728.         "Sandybridge", &sandybridge_gtt_driver },
  729.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
  730.         "Sandybridge", &sandybridge_gtt_driver },
  731.     { 0, NULL, NULL }
  732. };
  733.  
  734. static int find_gmch(u16 device)
  735. {
  736.     struct pci_dev *gmch_device;
  737.  
  738.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  739.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  740.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  741.                          device, gmch_device);
  742.     }
  743.  
  744.     if (!gmch_device)
  745.         return 0;
  746.  
  747.     intel_private.pcidev = gmch_device;
  748.     return 1;
  749. }
  750.  
  751. int intel_gmch_probe(struct pci_dev *pdev,
  752.                       struct agp_bridge_data *bridge)
  753. {
  754.     int i, mask;
  755.     intel_private.driver = NULL;
  756.  
  757.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  758.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  759.             intel_private.driver =
  760.                 intel_gtt_chipsets[i].gtt_driver;
  761.             break;
  762.         }
  763.     }
  764.  
  765.     if (!intel_private.driver)
  766.         return 0;
  767.  
  768.  //   bridge->driver = &intel_fake_agp_driver;
  769.     bridge->dev_private_data = &intel_private;
  770.     bridge->dev = pdev;
  771.  
  772.     intel_private.bridge_dev = pdev;
  773.  
  774.     dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  775.  
  776.     mask = intel_private.driver->dma_mask_size;
  777. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  778. //        dev_err(&intel_private.pcidev->dev,
  779. //            "set gfx device dma mask %d-bit failed!\n", mask);
  780. //    else
  781. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  782. //                        DMA_BIT_MASK(mask));
  783.  
  784.     /*if (bridge->driver == &intel_810_driver)
  785.         return 1;*/
  786.  
  787.     if (intel_gtt_init() != 0)
  788.         return 0;
  789.  
  790.     return 1;
  791. }
  792.  
  793. const struct intel_gtt *intel_gtt_get(void)
  794. {
  795.     return &intel_private.base;
  796. }
  797.  
  798. void intel_gtt_chipset_flush(void)
  799. {
  800.         if (intel_private.driver->chipset_flush)
  801.                 intel_private.driver->chipset_flush();
  802. }
  803.  
  804.  
  805. phys_addr_t get_bus_addr(void)
  806. {
  807.     return intel_private.gma_bus_addr;
  808. };
  809.