Subversion Repositories Kolibri OS

Rev

Rev 2325 | Rev 2327 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Intel GTT (Graphics Translation Table) routines
  3.  *
  4.  * Caveat: This driver implements the linux agp interface, but this is far from
  5.  * a agp driver! GTT support ended up here for purely historical reasons: The
  6.  * old userspace intel graphics drivers needed an interface to map memory into
  7.  * the GTT. And the drm provides a default interface for graphic devices sitting
  8.  * on an agp port. So it made sense to fake the GTT support as an agp port to
  9.  * avoid having to create a new api.
  10.  *
  11.  * With gem this does not make much sense anymore, just needlessly complicates
  12.  * the code. But as long as the old graphics stack is still support, it's stuck
  13.  * here.
  14.  *
  15.  * /fairy-tale-mode off
  16.  */
  17.  
  18. #include <linux/module.h>
  19. #include <errno-base.h>
  20. #include <linux/pci.h>
  21. #include <linux/kernel.h>
  22. //#include <linux/pagemap.h>
  23. //#include <linux/agp_backend.h>
  24. //#include <asm/smp.h>
  25. #include <linux/spinlock.h>
  26. #include "agp.h"
  27. #include "intel-agp.h"
  28. #include "intel-gtt.h"
  29.  
  30. #include <syscall.h>
  31.  
  32. struct pci_dev *
  33. pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
  34.  
  35. static bool intel_enable_gtt(void);
  36.  
  37.  
  38. #define PCI_VENDOR_ID_INTEL             0x8086
  39. #define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
  40. #define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
  41.  
  42.  
  43. #define AGP_NORMAL_MEMORY 0
  44.  
  45. #define AGP_USER_TYPES (1 << 16)
  46. #define AGP_USER_MEMORY (AGP_USER_TYPES)
  47. #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
  48.  
  49.  
  50. static inline int pci_read_config_word(struct pci_dev *dev, int where,
  51.                     u16 *val)
  52. {
  53.     *val = PciRead16(dev->busnr, dev->devfn, where);
  54.     return 1;
  55. }
  56.  
  57. static inline int pci_read_config_dword(struct pci_dev *dev, int where,
  58.                     u32 *val)
  59. {
  60.     *val = PciRead32(dev->busnr, dev->devfn, where);
  61.     return 1;
  62. }
  63.  
  64. static inline int pci_write_config_word(struct pci_dev *dev, int where,
  65.                     u16 val)
  66. {
  67.     PciWrite16(dev->busnr, dev->devfn, where, val);
  68.     return 1;
  69. }
  70.  
  71. /*
  72.  * If we have Intel graphics, we're not going to have anything other than
  73.  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  74.  * on the Intel IOMMU support (CONFIG_DMAR).
  75.  * Only newer chipsets need to bother with this, of course.
  76.  */
  77. #ifdef CONFIG_DMAR
  78. #define USE_PCI_DMA_API 1
  79. #else
  80. #define USE_PCI_DMA_API 0
  81. #endif
  82.  
  83. struct intel_gtt_driver {
  84.     unsigned int gen : 8;
  85.     unsigned int is_g33 : 1;
  86.     unsigned int is_pineview : 1;
  87.     unsigned int is_ironlake : 1;
  88.     unsigned int has_pgtbl_enable : 1;
  89.     unsigned int dma_mask_size : 8;
  90.     /* Chipset specific GTT setup */
  91.     int (*setup)(void);
  92.     /* This should undo anything done in ->setup() save the unmapping
  93.      * of the mmio register file, that's done in the generic code. */
  94.     void (*cleanup)(void);
  95.     void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
  96.     /* Flags is a more or less chipset specific opaque value.
  97.      * For chipsets that need to support old ums (non-gem) code, this
  98.      * needs to be identical to the various supported agp memory types! */
  99.     bool (*check_flags)(unsigned int flags);
  100.     void (*chipset_flush)(void);
  101. };
  102.  
  103. static struct _intel_private {
  104.     struct intel_gtt base;
  105.     const struct intel_gtt_driver *driver;
  106.     struct pci_dev *pcidev; /* device one */
  107.     struct pci_dev *bridge_dev;
  108.     u8 __iomem *registers;
  109.     phys_addr_t gtt_bus_addr;
  110.     phys_addr_t gma_bus_addr;
  111.     u32 PGETBL_save;
  112.     u32 __iomem *gtt;       /* I915G */
  113.     bool clear_fake_agp; /* on first access via agp, fill with scratch */
  114.     int num_dcache_entries;
  115.     void __iomem *i9xx_flush_page;
  116.     char *i81x_gtt_table;
  117.     struct resource ifp_resource;
  118.     int resource_valid;
  119.     struct page *scratch_page;
  120.     dma_addr_t scratch_page_dma;
  121. } intel_private;
  122.  
  123. #define INTEL_GTT_GEN   intel_private.driver->gen
  124. #define IS_G33          intel_private.driver->is_g33
  125. #define IS_PINEVIEW     intel_private.driver->is_pineview
  126. #define IS_IRONLAKE     intel_private.driver->is_ironlake
  127. #define HAS_PGTBL_EN    intel_private.driver->has_pgtbl_enable
  128.  
  129. static int intel_gtt_setup_scratch_page(void)
  130. {
  131.     addr_t page;
  132.  
  133.     page = AllocPage();
  134.     if (page == 0)
  135.         return -ENOMEM;
  136.  
  137.     intel_private.scratch_page_dma = page;
  138.     intel_private.scratch_page = NULL;
  139.  
  140.     return 0;
  141. }
  142.  
  143. static unsigned int intel_gtt_stolen_size(void)
  144. {
  145.     u16 gmch_ctrl;
  146.     u8 rdct;
  147.     int local = 0;
  148.     static const int ddt[4] = { 0, 16, 32, 64 };
  149.     unsigned int stolen_size = 0;
  150.  
  151.     if (INTEL_GTT_GEN == 1)
  152.         return 0; /* no stolen mem on i81x */
  153.  
  154.     pci_read_config_word(intel_private.bridge_dev,
  155.                  I830_GMCH_CTRL, &gmch_ctrl);
  156.  
  157.     if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
  158.         intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
  159.         switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
  160.         case I830_GMCH_GMS_STOLEN_512:
  161.             stolen_size = KB(512);
  162.             break;
  163.         case I830_GMCH_GMS_STOLEN_1024:
  164.             stolen_size = MB(1);
  165.             break;
  166.         case I830_GMCH_GMS_STOLEN_8192:
  167.             stolen_size = MB(8);
  168.             break;
  169.         case I830_GMCH_GMS_LOCAL:
  170.             rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
  171.             stolen_size = (I830_RDRAM_ND(rdct) + 1) *
  172.                     MB(ddt[I830_RDRAM_DDT(rdct)]);
  173.             local = 1;
  174.             break;
  175.         default:
  176.             stolen_size = 0;
  177.             break;
  178.         }
  179.     } else if (INTEL_GTT_GEN == 6) {
  180.         /*
  181.          * SandyBridge has new memory control reg at 0x50.w
  182.          */
  183.         u16 snb_gmch_ctl;
  184.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  185.         switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
  186.         case SNB_GMCH_GMS_STOLEN_32M:
  187.             stolen_size = MB(32);
  188.             break;
  189.         case SNB_GMCH_GMS_STOLEN_64M:
  190.             stolen_size = MB(64);
  191.             break;
  192.         case SNB_GMCH_GMS_STOLEN_96M:
  193.             stolen_size = MB(96);
  194.             break;
  195.         case SNB_GMCH_GMS_STOLEN_128M:
  196.             stolen_size = MB(128);
  197.             break;
  198.         case SNB_GMCH_GMS_STOLEN_160M:
  199.             stolen_size = MB(160);
  200.             break;
  201.         case SNB_GMCH_GMS_STOLEN_192M:
  202.             stolen_size = MB(192);
  203.             break;
  204.         case SNB_GMCH_GMS_STOLEN_224M:
  205.             stolen_size = MB(224);
  206.             break;
  207.         case SNB_GMCH_GMS_STOLEN_256M:
  208.             stolen_size = MB(256);
  209.             break;
  210.         case SNB_GMCH_GMS_STOLEN_288M:
  211.             stolen_size = MB(288);
  212.             break;
  213.         case SNB_GMCH_GMS_STOLEN_320M:
  214.             stolen_size = MB(320);
  215.             break;
  216.         case SNB_GMCH_GMS_STOLEN_352M:
  217.             stolen_size = MB(352);
  218.             break;
  219.         case SNB_GMCH_GMS_STOLEN_384M:
  220.             stolen_size = MB(384);
  221.             break;
  222.         case SNB_GMCH_GMS_STOLEN_416M:
  223.             stolen_size = MB(416);
  224.             break;
  225.         case SNB_GMCH_GMS_STOLEN_448M:
  226.             stolen_size = MB(448);
  227.             break;
  228.         case SNB_GMCH_GMS_STOLEN_480M:
  229.             stolen_size = MB(480);
  230.             break;
  231.         case SNB_GMCH_GMS_STOLEN_512M:
  232.             stolen_size = MB(512);
  233.             break;
  234.         }
  235.     } else {
  236.         switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
  237.         case I855_GMCH_GMS_STOLEN_1M:
  238.             stolen_size = MB(1);
  239.             break;
  240.         case I855_GMCH_GMS_STOLEN_4M:
  241.             stolen_size = MB(4);
  242.             break;
  243.         case I855_GMCH_GMS_STOLEN_8M:
  244.             stolen_size = MB(8);
  245.             break;
  246.         case I855_GMCH_GMS_STOLEN_16M:
  247.             stolen_size = MB(16);
  248.             break;
  249.         case I855_GMCH_GMS_STOLEN_32M:
  250.             stolen_size = MB(32);
  251.             break;
  252.         case I915_GMCH_GMS_STOLEN_48M:
  253.             stolen_size = MB(48);
  254.             break;
  255.         case I915_GMCH_GMS_STOLEN_64M:
  256.             stolen_size = MB(64);
  257.             break;
  258.         case G33_GMCH_GMS_STOLEN_128M:
  259.             stolen_size = MB(128);
  260.             break;
  261.         case G33_GMCH_GMS_STOLEN_256M:
  262.             stolen_size = MB(256);
  263.             break;
  264.         case INTEL_GMCH_GMS_STOLEN_96M:
  265.             stolen_size = MB(96);
  266.             break;
  267.         case INTEL_GMCH_GMS_STOLEN_160M:
  268.             stolen_size = MB(160);
  269.             break;
  270.         case INTEL_GMCH_GMS_STOLEN_224M:
  271.             stolen_size = MB(224);
  272.             break;
  273.         case INTEL_GMCH_GMS_STOLEN_352M:
  274.             stolen_size = MB(352);
  275.             break;
  276.         default:
  277.             stolen_size = 0;
  278.             break;
  279.         }
  280.     }
  281.  
  282.     if (stolen_size > 0) {
  283.         dbgprintf("detected %dK %s memory\n",
  284.                stolen_size / KB(1), local ? "local" : "stolen");
  285.     } else {
  286.         dbgprintf("no pre-allocated video memory detected\n");
  287.         stolen_size = 0;
  288.     }
  289.  
  290.     return stolen_size;
  291. }
  292.  
  293. static void i965_adjust_pgetbl_size(unsigned int size_flag)
  294. {
  295.     u32 pgetbl_ctl, pgetbl_ctl2;
  296.  
  297.     /* ensure that ppgtt is disabled */
  298.     pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
  299.     pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
  300.     writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
  301.  
  302.     /* write the new ggtt size */
  303.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  304.     pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
  305.     pgetbl_ctl |= size_flag;
  306.     writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
  307. }
  308.  
  309. static unsigned int i965_gtt_total_entries(void)
  310. {
  311.     int size;
  312.     u32 pgetbl_ctl;
  313.     u16 gmch_ctl;
  314.  
  315.     pci_read_config_word(intel_private.bridge_dev,
  316.                  I830_GMCH_CTRL, &gmch_ctl);
  317.  
  318.     if (INTEL_GTT_GEN == 5) {
  319.         switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
  320.         case G4x_GMCH_SIZE_1M:
  321.         case G4x_GMCH_SIZE_VT_1M:
  322.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
  323.             break;
  324.         case G4x_GMCH_SIZE_VT_1_5M:
  325.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
  326.             break;
  327.         case G4x_GMCH_SIZE_2M:
  328.         case G4x_GMCH_SIZE_VT_2M:
  329.             i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
  330.             break;
  331.         }
  332.     }
  333.  
  334.     pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
  335.  
  336.     switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
  337.     case I965_PGETBL_SIZE_128KB:
  338.         size = KB(128);
  339.         break;
  340.     case I965_PGETBL_SIZE_256KB:
  341.         size = KB(256);
  342.         break;
  343.     case I965_PGETBL_SIZE_512KB:
  344.         size = KB(512);
  345.         break;
  346.     /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
  347.     case I965_PGETBL_SIZE_1MB:
  348.         size = KB(1024);
  349.         break;
  350.     case I965_PGETBL_SIZE_2MB:
  351.         size = KB(2048);
  352.         break;
  353.     case I965_PGETBL_SIZE_1_5MB:
  354.         size = KB(1024 + 512);
  355.         break;
  356.     default:
  357.         dbgprintf("unknown page table size, assuming 512KB\n");
  358.         size = KB(512);
  359.     }
  360.  
  361.     return size/4;
  362. }
  363.  
  364. static unsigned int intel_gtt_total_entries(void)
  365. {
  366.     int size;
  367.  
  368.     if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
  369.         return i965_gtt_total_entries();
  370.     else if (INTEL_GTT_GEN == 6) {
  371.         u16 snb_gmch_ctl;
  372.  
  373.         pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  374.         switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
  375.         default:
  376.         case SNB_GTT_SIZE_0M:
  377.             printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
  378.             size = MB(0);
  379.             break;
  380.         case SNB_GTT_SIZE_1M:
  381.             size = MB(1);
  382.             break;
  383.         case SNB_GTT_SIZE_2M:
  384.             size = MB(2);
  385.             break;
  386.         }
  387.         return size/4;
  388.     } else {
  389.         /* On previous hardware, the GTT size was just what was
  390.          * required to map the aperture.
  391.          */
  392.         return intel_private.base.gtt_mappable_entries;
  393.     }
  394. }
  395.  
  396. static unsigned int intel_gtt_mappable_entries(void)
  397. {
  398.     unsigned int aperture_size;
  399.  
  400.     if (INTEL_GTT_GEN == 1) {
  401.         u32 smram_miscc;
  402.  
  403.         pci_read_config_dword(intel_private.bridge_dev,
  404.                       I810_SMRAM_MISCC, &smram_miscc);
  405.  
  406.         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
  407.                 == I810_GFX_MEM_WIN_32M)
  408.             aperture_size = MB(32);
  409.         else
  410.             aperture_size = MB(64);
  411.     } else if (INTEL_GTT_GEN == 2) {
  412.         u16 gmch_ctrl;
  413.  
  414.         pci_read_config_word(intel_private.bridge_dev,
  415.                      I830_GMCH_CTRL, &gmch_ctrl);
  416.  
  417.         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
  418.             aperture_size = MB(64);
  419.         else
  420.             aperture_size = MB(128);
  421.     } else {
  422.         /* 9xx supports large sizes, just look at the length */
  423.         aperture_size = pci_resource_len(intel_private.pcidev, 2);
  424.     }
  425.  
  426.     return aperture_size >> PAGE_SHIFT;
  427. }
  428.  
  429. static void intel_gtt_teardown_scratch_page(void)
  430. {
  431.    // FreePage(intel_private.scratch_page_dma);
  432. }
  433.  
  434. static void intel_gtt_cleanup(void)
  435. {
  436.     intel_private.driver->cleanup();
  437.  
  438.     FreeKernelSpace(intel_private.gtt);
  439.     FreeKernelSpace(intel_private.registers);
  440.  
  441.   //  intel_gtt_teardown_scratch_page();
  442. }
  443.  
  444. static int intel_gtt_init(void)
  445. {
  446.     u32 gtt_map_size;
  447.     int ret;
  448.  
  449.     ENTER();
  450.  
  451.     ret = intel_private.driver->setup();
  452.     if (ret != 0)
  453.     {
  454.         LEAVE();
  455.         return ret;
  456.     };
  457.  
  458.  
  459.     intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
  460.     intel_private.base.gtt_total_entries = intel_gtt_total_entries();
  461.  
  462.     /* save the PGETBL reg for resume */
  463.     intel_private.PGETBL_save =
  464.         readl(intel_private.registers+I810_PGETBL_CTL)
  465.             & ~I810_PGETBL_ENABLED;
  466.     /* we only ever restore the register when enabling the PGTBL... */
  467.     if (HAS_PGTBL_EN)
  468.         intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
  469.  
  470.     dbgprintf("detected gtt size: %dK total, %dK mappable\n",
  471.             intel_private.base.gtt_total_entries * 4,
  472.             intel_private.base.gtt_mappable_entries * 4);
  473.  
  474.     gtt_map_size = intel_private.base.gtt_total_entries * 4;
  475.  
  476.     intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
  477.                     gtt_map_size, PG_SW+PG_NOCACHE);
  478.     if (!intel_private.gtt) {
  479.         intel_private.driver->cleanup();
  480.         FreeKernelSpace(intel_private.registers);
  481.         return -ENOMEM;
  482.     }
  483.  
  484.     asm volatile("wbinvd");
  485.  
  486.     intel_private.base.stolen_size = intel_gtt_stolen_size();
  487.  
  488.     intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
  489.  
  490.     ret = intel_gtt_setup_scratch_page();
  491.     if (ret != 0) {
  492.         intel_gtt_cleanup();
  493.         return ret;
  494.     }
  495.  
  496.     intel_enable_gtt();
  497.  
  498.     LEAVE();
  499.  
  500.     return 0;
  501. }
  502.  
  503. static bool intel_enable_gtt(void)
  504. {
  505.     u32 gma_addr;
  506.     u8 __iomem *reg;
  507.  
  508.     if (INTEL_GTT_GEN <= 2)
  509.         pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
  510.                       &gma_addr);
  511.     else
  512.         pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
  513.                       &gma_addr);
  514.  
  515.     intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
  516.  
  517.     if (INTEL_GTT_GEN >= 6)
  518.         return true;
  519.  
  520.     if (INTEL_GTT_GEN == 2) {
  521.         u16 gmch_ctrl;
  522.  
  523.         pci_read_config_word(intel_private.bridge_dev,
  524.                      I830_GMCH_CTRL, &gmch_ctrl);
  525.         gmch_ctrl |= I830_GMCH_ENABLED;
  526.         pci_write_config_word(intel_private.bridge_dev,
  527.                       I830_GMCH_CTRL, gmch_ctrl);
  528.  
  529.         pci_read_config_word(intel_private.bridge_dev,
  530.                      I830_GMCH_CTRL, &gmch_ctrl);
  531.         if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
  532.             dbgprintf("failed to enable the GTT: GMCH_CTRL=%x\n",
  533.                 gmch_ctrl);
  534.             return false;
  535.         }
  536.     }
  537.  
  538.     /* On the resume path we may be adjusting the PGTBL value, so
  539.      * be paranoid and flush all chipset write buffers...
  540.      */
  541.     if (INTEL_GTT_GEN >= 3)
  542.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  543.  
  544.     reg = intel_private.registers+I810_PGETBL_CTL;
  545.     writel(intel_private.PGETBL_save, reg);
  546.     if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
  547.         dbgprintf("failed to enable the GTT: PGETBL=%x [expected %x]\n",
  548.             readl(reg), intel_private.PGETBL_save);
  549.         return false;
  550.     }
  551.  
  552.     if (INTEL_GTT_GEN >= 3)
  553.         writel(0, intel_private.registers+GFX_FLSH_CNTL);
  554.  
  555.     return true;
  556. }
  557.  
  558.  
  559.  
  560. static void intel_i9xx_setup_flush(void)
  561. {
  562.     /* return if already configured */
  563.     if (intel_private.ifp_resource.start)
  564.         return;
  565.  
  566.     if (INTEL_GTT_GEN == 6)
  567.         return;
  568.  
  569. #if 0
  570.     /* setup a resource for this object */
  571.     intel_private.ifp_resource.name = "Intel Flush Page";
  572.     intel_private.ifp_resource.flags = IORESOURCE_MEM;
  573.  
  574.     /* Setup chipset flush for 915 */
  575.     if (IS_G33 || INTEL_GTT_GEN >= 4) {
  576.         intel_i965_g33_setup_chipset_flush();
  577.     } else {
  578.         intel_i915_setup_chipset_flush();
  579.     }
  580.  
  581.     if (intel_private.ifp_resource.start)
  582.         intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
  583.     if (!intel_private.i9xx_flush_page)
  584.         dev_err(&intel_private.pcidev->dev,
  585.             "can't ioremap flush page - no chipset flushing\n");
  586. #endif
  587.  
  588. }
  589.  
  590. static void i9xx_chipset_flush(void)
  591. {
  592.     if (intel_private.i9xx_flush_page)
  593.         writel(1, intel_private.i9xx_flush_page);
  594. }
  595.  
  596. static bool gen6_check_flags(unsigned int flags)
  597. {
  598.     return true;
  599. }
  600.  
  601. static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
  602.                  unsigned int flags)
  603. {
  604.     unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
  605.     unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
  606.     u32 pte_flags;
  607.  
  608.     if (type_mask == AGP_USER_MEMORY)
  609.         pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
  610.     else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
  611.         pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
  612.         if (gfdt)
  613.             pte_flags |= GEN6_PTE_GFDT;
  614.     } else { /* set 'normal'/'cached' to LLC by default */
  615.         pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
  616.         if (gfdt)
  617.             pte_flags |= GEN6_PTE_GFDT;
  618.     }
  619.  
  620.     /* gen6 has bit11-4 for physical addr bit39-32 */
  621.     addr |= (addr >> 28) & 0xff0;
  622.     writel(addr | pte_flags, intel_private.gtt + entry);
  623. }
  624.  
  625. static void gen6_cleanup(void)
  626. {
  627. }
  628.  
  629. static int i9xx_setup(void)
  630. {
  631.     u32 reg_addr;
  632.  
  633.     pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
  634.  
  635.     reg_addr &= 0xfff80000;
  636.  
  637.     intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
  638.  
  639.     if (!intel_private.registers)
  640.         return -ENOMEM;
  641.  
  642.     if (INTEL_GTT_GEN == 3) {
  643.         u32 gtt_addr;
  644.  
  645.         pci_read_config_dword(intel_private.pcidev,
  646.                       I915_PTEADDR, &gtt_addr);
  647.         intel_private.gtt_bus_addr = gtt_addr;
  648.     } else {
  649.         u32 gtt_offset;
  650.  
  651.         switch (INTEL_GTT_GEN) {
  652.         case 5:
  653.         case 6:
  654.             gtt_offset = MB(2);
  655.             break;
  656.         case 4:
  657.         default:
  658.             gtt_offset =  KB(512);
  659.             break;
  660.         }
  661.         intel_private.gtt_bus_addr = reg_addr + gtt_offset;
  662.     }
  663.  
  664.     intel_i9xx_setup_flush();
  665.  
  666.     return 0;
  667. }
  668.  
  669. static const struct intel_gtt_driver sandybridge_gtt_driver = {
  670.     .gen = 6,
  671.     .setup = i9xx_setup,
  672.     .cleanup = gen6_cleanup,
  673.     .write_entry = gen6_write_entry,
  674.     .dma_mask_size = 40,
  675.     .check_flags = gen6_check_flags,
  676.     .chipset_flush = i9xx_chipset_flush,
  677. };
  678.  
  679. /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  680.  * driver and gmch_driver must be non-null, and find_gmch will determine
  681.  * which one should be used if a gmch_chip_id is present.
  682.  */
  683. static const struct intel_gtt_driver_description {
  684.     unsigned int gmch_chip_id;
  685.     char *name;
  686.     const struct intel_gtt_driver *gtt_driver;
  687. } intel_gtt_chipsets[] = {
  688.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
  689.         "Sandybridge", &sandybridge_gtt_driver },
  690.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
  691.         "Sandybridge", &sandybridge_gtt_driver },
  692.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
  693.         "Sandybridge", &sandybridge_gtt_driver },
  694.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
  695.         "Sandybridge", &sandybridge_gtt_driver },
  696.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
  697.         "Sandybridge", &sandybridge_gtt_driver },
  698.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
  699.         "Sandybridge", &sandybridge_gtt_driver },
  700.     { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
  701.         "Sandybridge", &sandybridge_gtt_driver },
  702.     { 0, NULL, NULL }
  703. };
  704.  
  705. static int find_gmch(u16 device)
  706. {
  707.     struct pci_dev *gmch_device;
  708.  
  709.     gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
  710.     if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
  711.         gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
  712.                          device, gmch_device);
  713.     }
  714.  
  715.     if (!gmch_device)
  716.         return 0;
  717.  
  718.     intel_private.pcidev = gmch_device;
  719.     return 1;
  720. }
  721.  
  722. int intel_gmch_probe(struct pci_dev *pdev,
  723.                       struct agp_bridge_data *bridge)
  724. {
  725.     int i, mask;
  726.     intel_private.driver = NULL;
  727.  
  728.     for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
  729.         if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
  730.             intel_private.driver =
  731.                 intel_gtt_chipsets[i].gtt_driver;
  732.             break;
  733.         }
  734.     }
  735.  
  736.     if (!intel_private.driver)
  737.         return 0;
  738.  
  739.  //   bridge->driver = &intel_fake_agp_driver;
  740.     bridge->dev_private_data = &intel_private;
  741.     bridge->dev = pdev;
  742.  
  743.     intel_private.bridge_dev = pdev;
  744.  
  745.     dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
  746.  
  747.     mask = intel_private.driver->dma_mask_size;
  748. //    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
  749. //        dev_err(&intel_private.pcidev->dev,
  750. //            "set gfx device dma mask %d-bit failed!\n", mask);
  751. //    else
  752. //        pci_set_consistent_dma_mask(intel_private.pcidev,
  753. //                        DMA_BIT_MASK(mask));
  754.  
  755.     /*if (bridge->driver == &intel_810_driver)
  756.         return 1;*/
  757.  
  758.     if (intel_gtt_init() != 0)
  759.         return 0;
  760.  
  761.     return 1;
  762. }
  763.  
  764. const struct intel_gtt *intel_gtt_get(void)
  765. {
  766.     return &intel_private.base;
  767. }
  768.  
  769.