Subversion Repositories Kolibri OS

Rev

Rev 2330 | Rev 2339 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "drm_crtc_helper.h"
  32. #include "drm_fb_helper.h"
  33. #include "intel_drv.h"
  34. #include "i915_drm.h"
  35. #include "i915_drv.h"
  36. #include <drm/intel-gtt.h>
  37. //#include "i915_trace.h"
  38. //#include "../../../platform/x86/intel_ips.h"
  39. #include <linux/pci.h>
  40. //#include <linux/vgaarb.h>
  41. //#include <linux/acpi.h>
  42. //#include <linux/pnp.h>
  43. //#include <linux/vga_switcheroo.h>
  44. #include <linux/slab.h>
  45. //#include <acpi/video.h>
  46.  
  47. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
  48.  
  49. static inline int pci_read_config_dword(struct pci_dev *dev, int where,
  50.                     u32 *val)
  51. {
  52.     *val = PciRead32(dev->busnr, dev->devfn, where);
  53.     return 1;
  54. }
  55.  
  56.  
  57.  
  58. static void i915_write_hws_pga(struct drm_device *dev)
  59. {
  60.     drm_i915_private_t *dev_priv = dev->dev_private;
  61.     u32 addr;
  62.  
  63.     addr = dev_priv->status_page_dmah->busaddr;
  64.     if (INTEL_INFO(dev)->gen >= 4)
  65.         addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  66.     I915_WRITE(HWS_PGA, addr);
  67. }
  68.  
  69. /**
  70.  * Sets up the hardware status page for devices that need a physical address
  71.  * in the register.
  72.  */
  73. static int i915_init_phys_hws(struct drm_device *dev)
  74. {
  75.     drm_i915_private_t *dev_priv = dev->dev_private;
  76.  
  77.     /* Program Hardware Status Page */
  78.     dev_priv->status_page_dmah =
  79.         drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
  80.  
  81.     if (!dev_priv->status_page_dmah) {
  82.         DRM_ERROR("Can not allocate hardware status page\n");
  83.         return -ENOMEM;
  84.     }
  85.  
  86.     i915_write_hws_pga(dev);
  87.  
  88.     dbgprintf("Enabled hardware status page\n");
  89.     return 0;
  90. }
  91.  
  92.  
  93.  
  94.  
  95.  
  96.  
  97.  
  98.  
  99.  
  100.  
  101. #define MCHBAR_I915 0x44
  102. #define MCHBAR_I965 0x48
  103. #define MCHBAR_SIZE (4*4096)
  104.  
  105. #define DEVEN_REG 0x54
  106. #define   DEVEN_MCHBAR_EN (1 << 28)
  107.  
  108.  
  109.  
  110.  
  111. /* Setup MCHBAR if possible, return true if we should disable it again */
  112. static void
  113. intel_setup_mchbar(struct drm_device *dev)
  114. {
  115.         drm_i915_private_t *dev_priv = dev->dev_private;
  116.         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  117.         u32 temp;
  118.         bool enabled;
  119.  
  120.         dev_priv->mchbar_need_disable = false;
  121.  
  122.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  123.                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  124.                 enabled = !!(temp & DEVEN_MCHBAR_EN);
  125.         } else {
  126.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  127.                 enabled = temp & 1;
  128.         }
  129.  
  130.         /* If it's already enabled, don't have to do anything */
  131.         if (enabled)
  132.                 return;
  133.  
  134.         dbgprintf("Epic fail\n");
  135.  
  136. #if 0
  137.         if (intel_alloc_mchbar_resource(dev))
  138.                 return;
  139.  
  140.         dev_priv->mchbar_need_disable = true;
  141.  
  142.         /* Space is allocated or reserved, so enable it. */
  143.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  144.                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  145.                                        temp | DEVEN_MCHBAR_EN);
  146.         } else {
  147.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  148.                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  149.         }
  150. #endif
  151. }
  152.  
  153.  
  154.  
  155.  
  156.  
  157.  
  158.  
  159.  
  160.  
  161.  
  162.  
  163.  
  164.  
  165.  
  166.  
  167. #define LFB_SIZE 0xC00000
  168.  
  169. static int i915_load_gem_init(struct drm_device *dev)
  170. {
  171.         struct drm_i915_private *dev_priv = dev->dev_private;
  172.         unsigned long prealloc_size, gtt_size, mappable_size;
  173.         int ret;
  174.  
  175.         prealloc_size = dev_priv->mm.gtt->stolen_size;
  176.         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
  177.         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  178.  
  179.     dbgprintf("%s prealloc: %x gtt: %x mappable: %x\n",__FUNCTION__,
  180.              prealloc_size, gtt_size, mappable_size);
  181.  
  182.         /* Basic memrange allocator for stolen space */
  183.         drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
  184.  
  185.         /* Let GEM Manage all of the aperture.
  186.          *
  187.          * However, leave one page at the end still bound to the scratch page.
  188.          * There are a number of places where the hardware apparently
  189.          * prefetches past the end of the object, and we've seen multiple
  190.          * hangs with the GPU head pointer stuck in a batchbuffer bound
  191.          * at the last page of the aperture.  One page should be enough to
  192.          * keep any prefetching inside of the aperture.
  193.          */
  194.     i915_gem_do_init(dev, LFB_SIZE, mappable_size, gtt_size - PAGE_SIZE - LFB_SIZE);
  195.  
  196.     mutex_lock(&dev->struct_mutex);
  197.     ret = i915_gem_init_ringbuffer(dev);
  198.     mutex_unlock(&dev->struct_mutex);
  199.     if (ret)
  200.         return ret;
  201.  
  202.         /* Try to set up FBC with a reasonable compressed buffer size */
  203. //   if (I915_HAS_FBC(dev) && i915_powersave) {
  204. //       int cfb_size;
  205.  
  206.                 /* Leave 1M for line length buffer & misc. */
  207.  
  208.                 /* Try to get a 32M buffer... */
  209. //       if (prealloc_size > (36*1024*1024))
  210. //           cfb_size = 32*1024*1024;
  211. //       else /* fall back to 7/8 of the stolen space */
  212. //           cfb_size = prealloc_size * 7 / 8;
  213. //       i915_setup_compression(dev, cfb_size);
  214. //   }
  215.  
  216.         /* Allow hardware batchbuffers unless told otherwise. */
  217.         dev_priv->allow_batchbuffer = 1;
  218.         return 0;
  219. }
  220.  
  221. static int i915_load_modeset_init(struct drm_device *dev)
  222. {
  223.     struct drm_i915_private *dev_priv = dev->dev_private;
  224.     int ret;
  225.  
  226.     ret = intel_parse_bios(dev);
  227.     if (ret)
  228.         DRM_INFO("failed to find VBIOS tables\n");
  229.  
  230. //    intel_register_dsm_handler();
  231.  
  232.     /* IIR "flip pending" bit means done if this bit is set */
  233.     if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
  234.         dev_priv->flip_pending_is_done = true;
  235.  
  236.     intel_modeset_init(dev);
  237.  
  238.     ret = i915_load_gem_init(dev);
  239.     if (ret)
  240.         goto cleanup_vga_switcheroo;
  241.  
  242.     intel_modeset_gem_init(dev);
  243.  
  244. //    ret = drm_irq_install(dev);
  245. //    if (ret)
  246. //        goto cleanup_gem;
  247.  
  248.     /* Always safe in the mode setting case. */
  249.     /* FIXME: do pre/post-mode set stuff in core KMS code */
  250.     dev->vblank_disable_allowed = 1;
  251.  
  252.     ret = intel_fbdev_init(dev);
  253.     if (ret)
  254.         goto cleanup_irq;
  255.  
  256. //    drm_kms_helper_poll_init(dev);
  257.  
  258.     /* We're off and running w/KMS */
  259.     dev_priv->mm.suspended = 0;
  260.  
  261.     return 0;
  262.  
  263. cleanup_irq:
  264. //    drm_irq_uninstall(dev);
  265. cleanup_gem:
  266. //    mutex_lock(&dev->struct_mutex);
  267. //    i915_gem_cleanup_ringbuffer(dev);
  268. //    mutex_unlock(&dev->struct_mutex);
  269. cleanup_vga_switcheroo:
  270. //    vga_switcheroo_unregister_client(dev->pdev);
  271. cleanup_vga_client:
  272. //    vga_client_register(dev->pdev, NULL, NULL, NULL);
  273. out:
  274.     return ret;
  275. }
  276.  
  277.  
  278.  
  279. static void i915_pineview_get_mem_freq(struct drm_device *dev)
  280. {
  281.     drm_i915_private_t *dev_priv = dev->dev_private;
  282.     u32 tmp;
  283.  
  284.     tmp = I915_READ(CLKCFG);
  285.  
  286.     switch (tmp & CLKCFG_FSB_MASK) {
  287.     case CLKCFG_FSB_533:
  288.         dev_priv->fsb_freq = 533; /* 133*4 */
  289.         break;
  290.     case CLKCFG_FSB_800:
  291.         dev_priv->fsb_freq = 800; /* 200*4 */
  292.         break;
  293.     case CLKCFG_FSB_667:
  294.         dev_priv->fsb_freq =  667; /* 167*4 */
  295.         break;
  296.     case CLKCFG_FSB_400:
  297.         dev_priv->fsb_freq = 400; /* 100*4 */
  298.         break;
  299.     }
  300.  
  301.     switch (tmp & CLKCFG_MEM_MASK) {
  302.     case CLKCFG_MEM_533:
  303.         dev_priv->mem_freq = 533;
  304.         break;
  305.     case CLKCFG_MEM_667:
  306.         dev_priv->mem_freq = 667;
  307.         break;
  308.     case CLKCFG_MEM_800:
  309.         dev_priv->mem_freq = 800;
  310.         break;
  311.     }
  312.  
  313.     /* detect pineview DDR3 setting */
  314.     tmp = I915_READ(CSHRDDR3CTL);
  315.     dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  316. }
  317.  
  318. static void i915_ironlake_get_mem_freq(struct drm_device *dev)
  319. {
  320.     drm_i915_private_t *dev_priv = dev->dev_private;
  321.     u16 ddrpll, csipll;
  322.  
  323.     ddrpll = I915_READ16(DDRMPLL1);
  324.     csipll = I915_READ16(CSIPLL0);
  325.  
  326.     switch (ddrpll & 0xff) {
  327.     case 0xc:
  328.         dev_priv->mem_freq = 800;
  329.         break;
  330.     case 0x10:
  331.         dev_priv->mem_freq = 1066;
  332.         break;
  333.     case 0x14:
  334.         dev_priv->mem_freq = 1333;
  335.         break;
  336.     case 0x18:
  337.         dev_priv->mem_freq = 1600;
  338.         break;
  339.     default:
  340.         DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
  341.                  ddrpll & 0xff);
  342.         dev_priv->mem_freq = 0;
  343.         break;
  344.     }
  345.  
  346.     dev_priv->r_t = dev_priv->mem_freq;
  347.  
  348.     switch (csipll & 0x3ff) {
  349.     case 0x00c:
  350.         dev_priv->fsb_freq = 3200;
  351.         break;
  352.     case 0x00e:
  353.         dev_priv->fsb_freq = 3733;
  354.         break;
  355.     case 0x010:
  356.         dev_priv->fsb_freq = 4266;
  357.         break;
  358.     case 0x012:
  359.         dev_priv->fsb_freq = 4800;
  360.         break;
  361.     case 0x014:
  362.         dev_priv->fsb_freq = 5333;
  363.         break;
  364.     case 0x016:
  365.         dev_priv->fsb_freq = 5866;
  366.         break;
  367.     case 0x018:
  368.         dev_priv->fsb_freq = 6400;
  369.         break;
  370.     default:
  371.         DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
  372.                  csipll & 0x3ff);
  373.         dev_priv->fsb_freq = 0;
  374.         break;
  375.     }
  376.  
  377.     if (dev_priv->fsb_freq == 3200) {
  378.         dev_priv->c_m = 0;
  379.     } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
  380.         dev_priv->c_m = 1;
  381.     } else {
  382.         dev_priv->c_m = 2;
  383.     }
  384. }
  385.  
  386. static int i915_get_bridge_dev(struct drm_device *dev)
  387. {
  388.     struct drm_i915_private *dev_priv = dev->dev_private;
  389.  
  390.     dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
  391.     if (!dev_priv->bridge_dev) {
  392.         DRM_ERROR("bridge device not found\n");
  393.         return -1;
  394.     }
  395.     return 0;
  396. }
  397.  
  398.  
  399. /* Global for IPS driver to get at the current i915 device */
  400. static struct drm_i915_private *i915_mch_dev;
  401. /*
  402.  * Lock protecting IPS related data structures
  403.  *   - i915_mch_dev
  404.  *   - dev_priv->max_delay
  405.  *   - dev_priv->min_delay
  406.  *   - dev_priv->fmax
  407.  *   - dev_priv->gpu_busy
  408.  */
  409. static DEFINE_SPINLOCK(mchdev_lock);
  410.  
  411.  
  412. /**
  413.  * i915_driver_load - setup chip and create an initial config
  414.  * @dev: DRM device
  415.  * @flags: startup flags
  416.  *
  417.  * The driver load routine has to do several things:
  418.  *   - drive output discovery via intel_modeset_init()
  419.  *   - initialize the memory manager
  420.  *   - allocate initial config memory
  421.  *   - setup the DRM framebuffer with the allocated memory
  422.  */
  423. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  424. {
  425.     struct drm_i915_private *dev_priv;
  426.     int ret = 0, mmio_bar;
  427.     uint32_t agp_size;
  428.  
  429.     ENTER();
  430.  
  431.     dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
  432.     if (dev_priv == NULL)
  433.         return -ENOMEM;
  434.  
  435.     dev->dev_private = (void *)dev_priv;
  436.     dev_priv->dev = dev;
  437.     dev_priv->info = (struct intel_device_info *) flags;
  438.  
  439.     if (i915_get_bridge_dev(dev)) {
  440.         ret = -EIO;
  441.         goto free_priv;
  442.     }
  443.  
  444.     /* overlay on gen2 is broken and can't address above 1G */
  445. //    if (IS_GEN2(dev))
  446. //        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
  447.  
  448.     /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  449.      * using 32bit addressing, overwriting memory if HWS is located
  450.      * above 4GB.
  451.      *
  452.      * The documentation also mentions an issue with undefined
  453.      * behaviour if any general state is accessed within a page above 4GB,
  454.      * which also needs to be handled carefully.
  455.      */
  456. //    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
  457. //        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
  458.  
  459.     mmio_bar = IS_GEN2(dev) ? 1 : 0;
  460.     dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
  461.     if (!dev_priv->regs) {
  462.         DRM_ERROR("failed to map registers\n");
  463.         ret = -EIO;
  464.         goto put_bridge;
  465.     }
  466.  
  467.     dev_priv->mm.gtt = intel_gtt_get();
  468.     if (!dev_priv->mm.gtt) {
  469.         DRM_ERROR("Failed to initialize GTT\n");
  470.         ret = -ENODEV;
  471.         goto out_rmmap;
  472.     }
  473.  
  474. //    agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  475.  
  476. /*   agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;   */
  477.  
  478. //    dev_priv->mm.gtt_mapping =
  479. //        io_mapping_create_wc(dev->agp->base, agp_size);
  480. //    if (dev_priv->mm.gtt_mapping == NULL) {
  481. //        ret = -EIO;
  482. //        goto out_rmmap;
  483. //    }
  484.  
  485.     /* Set up a WC MTRR for non-PAT systems.  This is more common than
  486.      * one would think, because the kernel disables PAT on first
  487.      * generation Core chips because WC PAT gets overridden by a UC
  488.      * MTRR if present.  Even if a UC MTRR isn't present.
  489.      */
  490. //    dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
  491. //                     agp_size,
  492. //                     MTRR_TYPE_WRCOMB, 1);
  493. //    if (dev_priv->mm.gtt_mtrr < 0) {
  494. //        DRM_INFO("MTRR allocation failed.  Graphics "
  495. //             "performance may suffer.\n");
  496. //    }
  497.  
  498.     /* The i915 workqueue is primarily used for batched retirement of
  499.      * requests (and thus managing bo) once the task has been completed
  500.      * by the GPU. i915_gem_retire_requests() is called directly when we
  501.      * need high-priority retirement, such as waiting for an explicit
  502.      * bo.
  503.      *
  504.      * It is also used for periodic low-priority events, such as
  505.      * idle-timers and recording error state.
  506.      *
  507.      * All tasks on the workqueue are expected to acquire the dev mutex
  508.      * so there is no point in running more than one instance of the
  509.      * workqueue at any time: max_active = 1 and NON_REENTRANT.
  510.      */
  511.  
  512. //    dev_priv->wq = alloc_workqueue("i915",
  513. //                       WQ_UNBOUND | WQ_NON_REENTRANT,
  514. //                       1);
  515. //    if (dev_priv->wq == NULL) {
  516. //        DRM_ERROR("Failed to create our workqueue.\n");
  517. //        ret = -ENOMEM;
  518. //        goto out_mtrrfree;
  519. //    }
  520.  
  521.     /* enable GEM by default */
  522.     dev_priv->has_gem = 1;
  523.  
  524.  
  525. //    intel_irq_init(dev);
  526.  
  527.     /* Try to make sure MCHBAR is enabled before poking at it */
  528.         intel_setup_mchbar(dev);
  529.     intel_setup_gmbus(dev);
  530.     intel_opregion_setup(dev);
  531.  
  532.     /* Make sure the bios did its job and set up vital registers */
  533.     intel_setup_bios(dev);
  534.  
  535.     i915_gem_load(dev);
  536.  
  537.     /* Init HWS */
  538.     if (!I915_NEED_GFX_HWS(dev)) {
  539.         ret = i915_init_phys_hws(dev);
  540.         if (ret)
  541.             goto out_gem_unload;
  542.     }
  543.  
  544.     if (IS_PINEVIEW(dev))
  545.         i915_pineview_get_mem_freq(dev);
  546.     else if (IS_GEN5(dev))
  547.         i915_ironlake_get_mem_freq(dev);
  548.  
  549.     /* On the 945G/GM, the chipset reports the MSI capability on the
  550.      * integrated graphics even though the support isn't actually there
  551.      * according to the published specs.  It doesn't appear to function
  552.      * correctly in testing on 945G.
  553.      * This may be a side effect of MSI having been made available for PEG
  554.      * and the registers being closely associated.
  555.      *
  556.      * According to chipset errata, on the 965GM, MSI interrupts may
  557.      * be lost or delayed, but we use them anyways to avoid
  558.      * stuck interrupts on some machines.
  559.      */
  560. //    if (!IS_I945G(dev) && !IS_I945GM(dev))
  561. //        pci_enable_msi(dev->pdev);
  562.  
  563.     spin_lock_init(&dev_priv->irq_lock);
  564.     spin_lock_init(&dev_priv->error_lock);
  565.     spin_lock_init(&dev_priv->rps_lock);
  566.  
  567.     if (IS_MOBILE(dev) || !IS_GEN2(dev))
  568.         dev_priv->num_pipe = 2;
  569.     else
  570.         dev_priv->num_pipe = 1;
  571.  
  572. //    ret = drm_vblank_init(dev, dev_priv->num_pipe);
  573. //    if (ret)
  574. //        goto out_gem_unload;
  575.  
  576.     /* Start out suspended */
  577.     dev_priv->mm.suspended = 1;
  578.  
  579.     intel_detect_pch(dev);
  580.  
  581.     ret = i915_load_modeset_init(dev);
  582.     if (ret < 0) {
  583.         DRM_ERROR("failed to init modeset\n");
  584.             goto out_gem_unload;
  585.     }
  586.  
  587.     /* Must be done after probing outputs */
  588. //    intel_opregion_init(dev);
  589. //    acpi_video_register();
  590.  
  591. //    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
  592. //            (unsigned long) dev);
  593.  
  594.     spin_lock(&mchdev_lock);
  595.     i915_mch_dev = dev_priv;
  596.     dev_priv->mchdev_lock = &mchdev_lock;
  597.     spin_unlock(&mchdev_lock);
  598.  
  599. //    ips_ping_for_i915_load();
  600.  
  601.     LEAVE();
  602.  
  603.     return 0;
  604.  
  605. out_gem_unload:
  606. //    if (dev_priv->mm.inactive_shrinker.shrink)
  607. //        unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  608.  
  609. //    if (dev->pdev->msi_enabled)
  610. //        pci_disable_msi(dev->pdev);
  611.  
  612. //    intel_teardown_gmbus(dev);
  613. //    intel_teardown_mchbar(dev);
  614. //    destroy_workqueue(dev_priv->wq);
  615. out_mtrrfree:
  616. //    if (dev_priv->mm.gtt_mtrr >= 0) {
  617. //        mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
  618. //             dev->agp->agp_info.aper_size * 1024 * 1024);
  619. //        dev_priv->mm.gtt_mtrr = -1;
  620. //    }
  621. //    io_mapping_free(dev_priv->mm.gtt_mapping);
  622.  
  623. out_rmmap:
  624.     pci_iounmap(dev->pdev, dev_priv->regs);
  625.  
  626. put_bridge:
  627. //    pci_dev_put(dev_priv->bridge_dev);
  628. free_priv:
  629.     kfree(dev_priv);
  630.     return ret;
  631. }
  632.  
  633.