Subversion Repositories Kolibri OS

Rev

Rev 2327 | Rev 2332 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "drm_crtc_helper.h"
  32. #include "drm_fb_helper.h"
  33. #include "intel_drv.h"
  34. #include "i915_drm.h"
  35. #include "i915_drv.h"
  36. #include <drm/intel-gtt.h>
  37. //#include "i915_trace.h"
  38. //#include "../../../platform/x86/intel_ips.h"
  39. #include <linux/pci.h>
  40. //#include <linux/vgaarb.h>
  41. //#include <linux/acpi.h>
  42. //#include <linux/pnp.h>
  43. //#include <linux/vga_switcheroo.h>
  44. #include <linux/slab.h>
  45. //#include <acpi/video.h>
  46.  
  47. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
  48.  
  49. static inline int pci_read_config_dword(struct pci_dev *dev, int where,
  50.                     u32 *val)
  51. {
  52.     *val = PciRead32(dev->busnr, dev->devfn, where);
  53.     return 1;
  54. }
  55.  
  56.  
  57.  
  58. static void i915_write_hws_pga(struct drm_device *dev)
  59. {
  60.     drm_i915_private_t *dev_priv = dev->dev_private;
  61.     u32 addr;
  62.  
  63.     addr = dev_priv->status_page_dmah->busaddr;
  64.     if (INTEL_INFO(dev)->gen >= 4)
  65.         addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  66.     I915_WRITE(HWS_PGA, addr);
  67. }
  68.  
  69. /**
  70.  * Sets up the hardware status page for devices that need a physical address
  71.  * in the register.
  72.  */
  73. static int i915_init_phys_hws(struct drm_device *dev)
  74. {
  75.     drm_i915_private_t *dev_priv = dev->dev_private;
  76.  
  77.     /* Program Hardware Status Page */
  78.     dev_priv->status_page_dmah =
  79.         drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
  80.  
  81.     if (!dev_priv->status_page_dmah) {
  82.         DRM_ERROR("Can not allocate hardware status page\n");
  83.         return -ENOMEM;
  84.     }
  85.  
  86.     i915_write_hws_pga(dev);
  87.  
  88.     dbgprintf("Enabled hardware status page\n");
  89.     return 0;
  90. }
  91.  
  92.  
  93.  
  94.  
  95.  
  96.  
  97.  
  98.  
  99.  
  100.  
  101. #define MCHBAR_I915 0x44
  102. #define MCHBAR_I965 0x48
  103. #define MCHBAR_SIZE (4*4096)
  104.  
  105. #define DEVEN_REG 0x54
  106. #define   DEVEN_MCHBAR_EN (1 << 28)
  107.  
  108.  
  109.  
  110.  
  111. /* Setup MCHBAR if possible, return true if we should disable it again */
  112. static void
  113. intel_setup_mchbar(struct drm_device *dev)
  114. {
  115.         drm_i915_private_t *dev_priv = dev->dev_private;
  116.         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  117.         u32 temp;
  118.         bool enabled;
  119.  
  120.         dev_priv->mchbar_need_disable = false;
  121.  
  122.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  123.                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  124.                 enabled = !!(temp & DEVEN_MCHBAR_EN);
  125.         } else {
  126.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  127.                 enabled = temp & 1;
  128.         }
  129.  
  130.         /* If it's already enabled, don't have to do anything */
  131.         if (enabled)
  132.                 return;
  133.  
  134.         dbgprintf("Epic fail\n");
  135.  
  136. #if 0
  137.         if (intel_alloc_mchbar_resource(dev))
  138.                 return;
  139.  
  140.         dev_priv->mchbar_need_disable = true;
  141.  
  142.         /* Space is allocated or reserved, so enable it. */
  143.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  144.                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  145.                                        temp | DEVEN_MCHBAR_EN);
  146.         } else {
  147.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  148.                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  149.         }
  150. #endif
  151. }
  152.  
  153.  
  154.  
  155.  
  156.  
  157.  
  158.  
  159.  
  160.  
  161.  
  162.  
  163.  
  164.  
  165.  
  166.  
  167.  
  168. static int i915_load_gem_init(struct drm_device *dev)
  169. {
  170.         struct drm_i915_private *dev_priv = dev->dev_private;
  171.         unsigned long prealloc_size, gtt_size, mappable_size;
  172.         int ret;
  173.  
  174.         prealloc_size = dev_priv->mm.gtt->stolen_size;
  175.         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
  176.         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  177.  
  178.     dbgprintf("%s prealloc: %x gtt: %x mappable: %x\n",__FUNCTION__,
  179.              prealloc_size, gtt_size, mappable_size);
  180.  
  181.         /* Basic memrange allocator for stolen space */
  182.         drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
  183.  
  184.     //0xC00000 >> PAGE_SHIFT
  185.  
  186.         /* Let GEM Manage all of the aperture.
  187.          *
  188.          * However, leave one page at the end still bound to the scratch page.
  189.          * There are a number of places where the hardware apparently
  190.          * prefetches past the end of the object, and we've seen multiple
  191.          * hangs with the GPU head pointer stuck in a batchbuffer bound
  192.          * at the last page of the aperture.  One page should be enough to
  193.          * keep any prefetching inside of the aperture.
  194.          */
  195. //   i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
  196.  
  197. //   mutex_lock(&dev->struct_mutex);
  198. //   ret = i915_gem_init_ringbuffer(dev);
  199. //   mutex_unlock(&dev->struct_mutex);
  200. //   if (ret)
  201. //       return ret;
  202.  
  203.         /* Try to set up FBC with a reasonable compressed buffer size */
  204. //   if (I915_HAS_FBC(dev) && i915_powersave) {
  205. //       int cfb_size;
  206.  
  207.                 /* Leave 1M for line length buffer & misc. */
  208.  
  209.                 /* Try to get a 32M buffer... */
  210. //       if (prealloc_size > (36*1024*1024))
  211. //           cfb_size = 32*1024*1024;
  212. //       else /* fall back to 7/8 of the stolen space */
  213. //           cfb_size = prealloc_size * 7 / 8;
  214. //       i915_setup_compression(dev, cfb_size);
  215. //   }
  216.  
  217.         /* Allow hardware batchbuffers unless told otherwise. */
  218.         dev_priv->allow_batchbuffer = 1;
  219.         return 0;
  220. }
  221.  
  222. static int i915_load_modeset_init(struct drm_device *dev)
  223. {
  224.     struct drm_i915_private *dev_priv = dev->dev_private;
  225.     int ret;
  226.  
  227.     ret = intel_parse_bios(dev);
  228.     if (ret)
  229.         DRM_INFO("failed to find VBIOS tables\n");
  230.  
  231. //    intel_register_dsm_handler();
  232.  
  233.     /* IIR "flip pending" bit means done if this bit is set */
  234.     if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
  235.         dev_priv->flip_pending_is_done = true;
  236.  
  237.     intel_modeset_init(dev);
  238.  
  239.     ret = i915_load_gem_init(dev);
  240.     if (ret)
  241.         goto cleanup_vga_switcheroo;
  242.  
  243. #if 0
  244.  
  245.     intel_modeset_gem_init(dev);
  246.  
  247.     ret = drm_irq_install(dev);
  248.     if (ret)
  249.         goto cleanup_gem;
  250.  
  251.     /* Always safe in the mode setting case. */
  252.     /* FIXME: do pre/post-mode set stuff in core KMS code */
  253.     dev->vblank_disable_allowed = 1;
  254.  
  255.     ret = intel_fbdev_init(dev);
  256.     if (ret)
  257.         goto cleanup_irq;
  258.  
  259.     drm_kms_helper_poll_init(dev);
  260.  
  261.     /* We're off and running w/KMS */
  262.     dev_priv->mm.suspended = 0;
  263.  
  264. #endif
  265.  
  266.     return 0;
  267.  
  268. cleanup_irq:
  269. //    drm_irq_uninstall(dev);
  270. cleanup_gem:
  271. //    mutex_lock(&dev->struct_mutex);
  272. //    i915_gem_cleanup_ringbuffer(dev);
  273. //    mutex_unlock(&dev->struct_mutex);
  274. cleanup_vga_switcheroo:
  275. //    vga_switcheroo_unregister_client(dev->pdev);
  276. cleanup_vga_client:
  277. //    vga_client_register(dev->pdev, NULL, NULL, NULL);
  278. out:
  279.     return ret;
  280. }
  281.  
  282.  
  283.  
  284. static void i915_pineview_get_mem_freq(struct drm_device *dev)
  285. {
  286.     drm_i915_private_t *dev_priv = dev->dev_private;
  287.     u32 tmp;
  288.  
  289.     tmp = I915_READ(CLKCFG);
  290.  
  291.     switch (tmp & CLKCFG_FSB_MASK) {
  292.     case CLKCFG_FSB_533:
  293.         dev_priv->fsb_freq = 533; /* 133*4 */
  294.         break;
  295.     case CLKCFG_FSB_800:
  296.         dev_priv->fsb_freq = 800; /* 200*4 */
  297.         break;
  298.     case CLKCFG_FSB_667:
  299.         dev_priv->fsb_freq =  667; /* 167*4 */
  300.         break;
  301.     case CLKCFG_FSB_400:
  302.         dev_priv->fsb_freq = 400; /* 100*4 */
  303.         break;
  304.     }
  305.  
  306.     switch (tmp & CLKCFG_MEM_MASK) {
  307.     case CLKCFG_MEM_533:
  308.         dev_priv->mem_freq = 533;
  309.         break;
  310.     case CLKCFG_MEM_667:
  311.         dev_priv->mem_freq = 667;
  312.         break;
  313.     case CLKCFG_MEM_800:
  314.         dev_priv->mem_freq = 800;
  315.         break;
  316.     }
  317.  
  318.     /* detect pineview DDR3 setting */
  319.     tmp = I915_READ(CSHRDDR3CTL);
  320.     dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  321. }
  322.  
  323. static void i915_ironlake_get_mem_freq(struct drm_device *dev)
  324. {
  325.     drm_i915_private_t *dev_priv = dev->dev_private;
  326.     u16 ddrpll, csipll;
  327.  
  328.     ddrpll = I915_READ16(DDRMPLL1);
  329.     csipll = I915_READ16(CSIPLL0);
  330.  
  331.     switch (ddrpll & 0xff) {
  332.     case 0xc:
  333.         dev_priv->mem_freq = 800;
  334.         break;
  335.     case 0x10:
  336.         dev_priv->mem_freq = 1066;
  337.         break;
  338.     case 0x14:
  339.         dev_priv->mem_freq = 1333;
  340.         break;
  341.     case 0x18:
  342.         dev_priv->mem_freq = 1600;
  343.         break;
  344.     default:
  345.         DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
  346.                  ddrpll & 0xff);
  347.         dev_priv->mem_freq = 0;
  348.         break;
  349.     }
  350.  
  351.     dev_priv->r_t = dev_priv->mem_freq;
  352.  
  353.     switch (csipll & 0x3ff) {
  354.     case 0x00c:
  355.         dev_priv->fsb_freq = 3200;
  356.         break;
  357.     case 0x00e:
  358.         dev_priv->fsb_freq = 3733;
  359.         break;
  360.     case 0x010:
  361.         dev_priv->fsb_freq = 4266;
  362.         break;
  363.     case 0x012:
  364.         dev_priv->fsb_freq = 4800;
  365.         break;
  366.     case 0x014:
  367.         dev_priv->fsb_freq = 5333;
  368.         break;
  369.     case 0x016:
  370.         dev_priv->fsb_freq = 5866;
  371.         break;
  372.     case 0x018:
  373.         dev_priv->fsb_freq = 6400;
  374.         break;
  375.     default:
  376.         DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
  377.                  csipll & 0x3ff);
  378.         dev_priv->fsb_freq = 0;
  379.         break;
  380.     }
  381.  
  382.     if (dev_priv->fsb_freq == 3200) {
  383.         dev_priv->c_m = 0;
  384.     } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
  385.         dev_priv->c_m = 1;
  386.     } else {
  387.         dev_priv->c_m = 2;
  388.     }
  389. }
  390.  
  391. static int i915_get_bridge_dev(struct drm_device *dev)
  392. {
  393.     struct drm_i915_private *dev_priv = dev->dev_private;
  394.  
  395.     dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
  396.     if (!dev_priv->bridge_dev) {
  397.         DRM_ERROR("bridge device not found\n");
  398.         return -1;
  399.     }
  400.     return 0;
  401. }
  402.  
  403.  
  404. /* Global for IPS driver to get at the current i915 device */
  405. static struct drm_i915_private *i915_mch_dev;
  406. /*
  407.  * Lock protecting IPS related data structures
  408.  *   - i915_mch_dev
  409.  *   - dev_priv->max_delay
  410.  *   - dev_priv->min_delay
  411.  *   - dev_priv->fmax
  412.  *   - dev_priv->gpu_busy
  413.  */
  414. static DEFINE_SPINLOCK(mchdev_lock);
  415.  
  416.  
  417. /**
  418.  * i915_driver_load - setup chip and create an initial config
  419.  * @dev: DRM device
  420.  * @flags: startup flags
  421.  *
  422.  * The driver load routine has to do several things:
  423.  *   - drive output discovery via intel_modeset_init()
  424.  *   - initialize the memory manager
  425.  *   - allocate initial config memory
  426.  *   - setup the DRM framebuffer with the allocated memory
  427.  */
  428. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  429. {
  430.     struct drm_i915_private *dev_priv;
  431.     int ret = 0, mmio_bar;
  432.     uint32_t agp_size;
  433.  
  434.     ENTER();
  435.  
  436.     dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
  437.     if (dev_priv == NULL)
  438.         return -ENOMEM;
  439.  
  440.     dev->dev_private = (void *)dev_priv;
  441.     dev_priv->dev = dev;
  442.     dev_priv->info = (struct intel_device_info *) flags;
  443.  
  444.     if (i915_get_bridge_dev(dev)) {
  445.         ret = -EIO;
  446.         goto free_priv;
  447.     }
  448.  
  449.     /* overlay on gen2 is broken and can't address above 1G */
  450. //    if (IS_GEN2(dev))
  451. //        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
  452.  
  453.     /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  454.      * using 32bit addressing, overwriting memory if HWS is located
  455.      * above 4GB.
  456.      *
  457.      * The documentation also mentions an issue with undefined
  458.      * behaviour if any general state is accessed within a page above 4GB,
  459.      * which also needs to be handled carefully.
  460.      */
  461. //    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
  462. //        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
  463.  
  464.     mmio_bar = IS_GEN2(dev) ? 1 : 0;
  465.     dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
  466.     if (!dev_priv->regs) {
  467.         DRM_ERROR("failed to map registers\n");
  468.         ret = -EIO;
  469.         goto put_bridge;
  470.     }
  471.  
  472.     dev_priv->mm.gtt = intel_gtt_get();
  473.     if (!dev_priv->mm.gtt) {
  474.         DRM_ERROR("Failed to initialize GTT\n");
  475.         ret = -ENODEV;
  476.         goto out_rmmap;
  477.     }
  478.  
  479. //    agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  480.  
  481. /*   agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;   */
  482.  
  483. //    dev_priv->mm.gtt_mapping =
  484. //        io_mapping_create_wc(dev->agp->base, agp_size);
  485. //    if (dev_priv->mm.gtt_mapping == NULL) {
  486. //        ret = -EIO;
  487. //        goto out_rmmap;
  488. //    }
  489.  
  490.     /* Set up a WC MTRR for non-PAT systems.  This is more common than
  491.      * one would think, because the kernel disables PAT on first
  492.      * generation Core chips because WC PAT gets overridden by a UC
  493.      * MTRR if present.  Even if a UC MTRR isn't present.
  494.      */
  495. //    dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
  496. //                     agp_size,
  497. //                     MTRR_TYPE_WRCOMB, 1);
  498. //    if (dev_priv->mm.gtt_mtrr < 0) {
  499. //        DRM_INFO("MTRR allocation failed.  Graphics "
  500. //             "performance may suffer.\n");
  501. //    }
  502.  
  503.     /* The i915 workqueue is primarily used for batched retirement of
  504.      * requests (and thus managing bo) once the task has been completed
  505.      * by the GPU. i915_gem_retire_requests() is called directly when we
  506.      * need high-priority retirement, such as waiting for an explicit
  507.      * bo.
  508.      *
  509.      * It is also used for periodic low-priority events, such as
  510.      * idle-timers and recording error state.
  511.      *
  512.      * All tasks on the workqueue are expected to acquire the dev mutex
  513.      * so there is no point in running more than one instance of the
  514.      * workqueue at any time: max_active = 1 and NON_REENTRANT.
  515.      */
  516.  
  517. //    dev_priv->wq = alloc_workqueue("i915",
  518. //                       WQ_UNBOUND | WQ_NON_REENTRANT,
  519. //                       1);
  520. //    if (dev_priv->wq == NULL) {
  521. //        DRM_ERROR("Failed to create our workqueue.\n");
  522. //        ret = -ENOMEM;
  523. //        goto out_mtrrfree;
  524. //    }
  525.  
  526.     /* enable GEM by default */
  527.     dev_priv->has_gem = 1;
  528.  
  529.  
  530. //    intel_irq_init(dev);
  531.  
  532.     /* Try to make sure MCHBAR is enabled before poking at it */
  533.         intel_setup_mchbar(dev);
  534.     intel_setup_gmbus(dev);
  535.     intel_opregion_setup(dev);
  536.  
  537.     /* Make sure the bios did its job and set up vital registers */
  538.     intel_setup_bios(dev);
  539.  
  540.     i915_gem_load(dev);
  541.  
  542.     /* Init HWS */
  543.     if (!I915_NEED_GFX_HWS(dev)) {
  544.         ret = i915_init_phys_hws(dev);
  545.         if (ret)
  546.             goto out_gem_unload;
  547.     }
  548.  
  549.     if (IS_PINEVIEW(dev))
  550.         i915_pineview_get_mem_freq(dev);
  551.     else if (IS_GEN5(dev))
  552.         i915_ironlake_get_mem_freq(dev);
  553.  
  554.     /* On the 945G/GM, the chipset reports the MSI capability on the
  555.      * integrated graphics even though the support isn't actually there
  556.      * according to the published specs.  It doesn't appear to function
  557.      * correctly in testing on 945G.
  558.      * This may be a side effect of MSI having been made available for PEG
  559.      * and the registers being closely associated.
  560.      *
  561.      * According to chipset errata, on the 965GM, MSI interrupts may
  562.      * be lost or delayed, but we use them anyways to avoid
  563.      * stuck interrupts on some machines.
  564.      */
  565. //    if (!IS_I945G(dev) && !IS_I945GM(dev))
  566. //        pci_enable_msi(dev->pdev);
  567.  
  568.     spin_lock_init(&dev_priv->irq_lock);
  569.     spin_lock_init(&dev_priv->error_lock);
  570.     spin_lock_init(&dev_priv->rps_lock);
  571.  
  572.     if (IS_MOBILE(dev) || !IS_GEN2(dev))
  573.         dev_priv->num_pipe = 2;
  574.     else
  575.         dev_priv->num_pipe = 1;
  576.  
  577. //    ret = drm_vblank_init(dev, dev_priv->num_pipe);
  578. //    if (ret)
  579. //        goto out_gem_unload;
  580.  
  581.     /* Start out suspended */
  582.     dev_priv->mm.suspended = 1;
  583.  
  584.     intel_detect_pch(dev);
  585.  
  586.     ret = i915_load_modeset_init(dev);
  587.     if (ret < 0) {
  588.         DRM_ERROR("failed to init modeset\n");
  589.             goto out_gem_unload;
  590.     }
  591.  
  592.     /* Must be done after probing outputs */
  593. //    intel_opregion_init(dev);
  594. //    acpi_video_register();
  595.  
  596. //    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
  597. //            (unsigned long) dev);
  598.  
  599.     spin_lock(&mchdev_lock);
  600.     i915_mch_dev = dev_priv;
  601.     dev_priv->mchdev_lock = &mchdev_lock;
  602.     spin_unlock(&mchdev_lock);
  603.  
  604. //    ips_ping_for_i915_load();
  605.  
  606.     LEAVE();
  607.  
  608.     return 0;
  609.  
  610. out_gem_unload:
  611. //    if (dev_priv->mm.inactive_shrinker.shrink)
  612. //        unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  613.  
  614. //    if (dev->pdev->msi_enabled)
  615. //        pci_disable_msi(dev->pdev);
  616.  
  617. //    intel_teardown_gmbus(dev);
  618. //    intel_teardown_mchbar(dev);
  619. //    destroy_workqueue(dev_priv->wq);
  620. out_mtrrfree:
  621. //    if (dev_priv->mm.gtt_mtrr >= 0) {
  622. //        mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
  623. //             dev->agp->agp_info.aper_size * 1024 * 1024);
  624. //        dev_priv->mm.gtt_mtrr = -1;
  625. //    }
  626. //    io_mapping_free(dev_priv->mm.gtt_mapping);
  627.  
  628. out_rmmap:
  629.     pci_iounmap(dev->pdev, dev_priv->regs);
  630.  
  631. put_bridge:
  632. //    pci_dev_put(dev_priv->bridge_dev);
  633. free_priv:
  634.     kfree(dev_priv);
  635.     return ret;
  636. }
  637.  
  638.