Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5354 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
  2.  */
  3. /*
  4.  *
  5.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6.  * All Rights Reserved.
  7.  *
  8.  * Permission is hereby granted, free of charge, to any person obtaining a
  9.  * copy of this software and associated documentation files (the
  10.  * "Software"), to deal in the Software without restriction, including
  11.  * without limitation the rights to use, copy, modify, merge, publish,
  12.  * distribute, sub license, and/or sell copies of the Software, and to
  13.  * permit persons to whom the Software is furnished to do so, subject to
  14.  * the following conditions:
  15.  *
  16.  * The above copyright notice and this permission notice (including the
  17.  * next paragraph) shall be included in all copies or substantial portions
  18.  * of the Software.
  19.  *
  20.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27.  *
  28.  */
  29.  
  30. //#include <linux/device.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36.  
  37. #include <linux/module.h>
  38. #include <linux/mod_devicetable.h>
  39. #include <errno-base.h>
  40. #include <linux/pci.h>
  41. #include <drm/i915_pciids.h>
  42.  
  43. #include <drm/drm_crtc_helper.h>
  44.  
  45. #include <syscall.h>
  46.  
  47. #define __read_mostly
  48.  
  49. static struct drm_driver driver;
  50.  
  51. #define GEN_DEFAULT_PIPEOFFSETS \
  52.         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
  53.                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
  54.         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
  55.                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
  56.         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
  57.  
  58. #define GEN_CHV_PIPEOFFSETS \
  59.         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
  60.                           CHV_PIPE_C_OFFSET }, \
  61.         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
  62.                            CHV_TRANSCODER_C_OFFSET, }, \
  63.         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
  64.                              CHV_PALETTE_C_OFFSET }
  65.  
  66. #define CURSOR_OFFSETS \
  67.         .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
  68.  
  69. #define IVB_CURSOR_OFFSETS \
  70.         .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
  71.  
  72. int init_display_kms(struct drm_device *dev);
  73.  
  74.  
  75. extern int intel_agp_enabled;
  76.  
  77. #define PCI_VENDOR_ID_INTEL        0x8086
  78.  
  79.  
  80. static const struct intel_device_info intel_i915g_info = {
  81.         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  82.         .has_overlay = 1, .overlay_needs_physical = 1,
  83.         .ring_mask = RENDER_RING,
  84.         GEN_DEFAULT_PIPEOFFSETS,
  85.         CURSOR_OFFSETS,
  86. };
  87. static const struct intel_device_info intel_i915gm_info = {
  88.         .gen = 3, .is_mobile = 1, .num_pipes = 2,
  89.         .cursor_needs_physical = 1,
  90.         .has_overlay = 1, .overlay_needs_physical = 1,
  91.         .supports_tv = 1,
  92.         .has_fbc = 1,
  93.         .ring_mask = RENDER_RING,
  94.         GEN_DEFAULT_PIPEOFFSETS,
  95.         CURSOR_OFFSETS,
  96. };
  97. static const struct intel_device_info intel_i945g_info = {
  98.         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  99.         .has_overlay = 1, .overlay_needs_physical = 1,
  100.         .ring_mask = RENDER_RING,
  101.         GEN_DEFAULT_PIPEOFFSETS,
  102.         CURSOR_OFFSETS,
  103. };
  104. static const struct intel_device_info intel_i945gm_info = {
  105.         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
  106.         .has_hotplug = 1, .cursor_needs_physical = 1,
  107.         .has_overlay = 1, .overlay_needs_physical = 1,
  108.         .supports_tv = 1,
  109.         .has_fbc = 1,
  110.         .ring_mask = RENDER_RING,
  111.         GEN_DEFAULT_PIPEOFFSETS,
  112.         CURSOR_OFFSETS,
  113. };
  114.  
  115. static const struct intel_device_info intel_i965g_info = {
  116.         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
  117.         .has_hotplug = 1,
  118.         .has_overlay = 1,
  119.         .ring_mask = RENDER_RING,
  120.         GEN_DEFAULT_PIPEOFFSETS,
  121.         CURSOR_OFFSETS,
  122. };
  123.  
  124. static const struct intel_device_info intel_i965gm_info = {
  125.         .gen = 4, .is_crestline = 1, .num_pipes = 2,
  126.         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
  127.         .has_overlay = 1,
  128.         .supports_tv = 1,
  129.         .ring_mask = RENDER_RING,
  130.         GEN_DEFAULT_PIPEOFFSETS,
  131.         CURSOR_OFFSETS,
  132. };
  133.  
  134. static const struct intel_device_info intel_g33_info = {
  135.         .gen = 3, .is_g33 = 1, .num_pipes = 2,
  136.         .need_gfx_hws = 1, .has_hotplug = 1,
  137.         .has_overlay = 1,
  138.         .ring_mask = RENDER_RING,
  139.         GEN_DEFAULT_PIPEOFFSETS,
  140.         CURSOR_OFFSETS,
  141. };
  142.  
  143. static const struct intel_device_info intel_g45_info = {
  144.         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
  145.         .has_pipe_cxsr = 1, .has_hotplug = 1,
  146.         .ring_mask = RENDER_RING | BSD_RING,
  147.         GEN_DEFAULT_PIPEOFFSETS,
  148.         CURSOR_OFFSETS,
  149. };
  150.  
  151. static const struct intel_device_info intel_gm45_info = {
  152.         .gen = 4, .is_g4x = 1, .num_pipes = 2,
  153.         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
  154.         .has_pipe_cxsr = 1, .has_hotplug = 1,
  155.         .supports_tv = 1,
  156.         .ring_mask = RENDER_RING | BSD_RING,
  157.         GEN_DEFAULT_PIPEOFFSETS,
  158.         CURSOR_OFFSETS,
  159. };
  160.  
  161. static const struct intel_device_info intel_pineview_info = {
  162.         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
  163.         .need_gfx_hws = 1, .has_hotplug = 1,
  164.         .has_overlay = 1,
  165.         GEN_DEFAULT_PIPEOFFSETS,
  166.         CURSOR_OFFSETS,
  167. };
  168.  
  169. static const struct intel_device_info intel_ironlake_d_info = {
  170.         .gen = 5, .num_pipes = 2,
  171.         .need_gfx_hws = 1, .has_hotplug = 1,
  172.         .ring_mask = RENDER_RING | BSD_RING,
  173.         GEN_DEFAULT_PIPEOFFSETS,
  174.         CURSOR_OFFSETS,
  175. };
  176.  
  177. static const struct intel_device_info intel_ironlake_m_info = {
  178.         .gen = 5, .is_mobile = 1, .num_pipes = 2,
  179.         .need_gfx_hws = 1, .has_hotplug = 1,
  180.         .has_fbc = 1,
  181.         .ring_mask = RENDER_RING | BSD_RING,
  182.         GEN_DEFAULT_PIPEOFFSETS,
  183.         CURSOR_OFFSETS,
  184. };
  185.  
  186. static const struct intel_device_info intel_sandybridge_d_info = {
  187.         .gen = 6, .num_pipes = 2,
  188.         .need_gfx_hws = 1, .has_hotplug = 1,
  189.         .has_fbc = 1,
  190.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
  191.         .has_llc = 1,
  192.         GEN_DEFAULT_PIPEOFFSETS,
  193.         CURSOR_OFFSETS,
  194. };
  195.  
  196. static const struct intel_device_info intel_sandybridge_m_info = {
  197.         .gen = 6, .is_mobile = 1, .num_pipes = 2,
  198.         .need_gfx_hws = 1, .has_hotplug = 1,
  199.     .has_fbc      = 1,
  200.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
  201.         .has_llc = 1,
  202.         GEN_DEFAULT_PIPEOFFSETS,
  203.         CURSOR_OFFSETS,
  204. };
  205.  
  206. #define GEN7_FEATURES  \
  207.         .gen = 7, .num_pipes = 3, \
  208.         .need_gfx_hws = 1, .has_hotplug = 1, \
  209.         .has_fbc = 1, \
  210.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
  211.         .has_llc = 1
  212.  
  213. static const struct intel_device_info intel_ivybridge_d_info = {
  214.         GEN7_FEATURES,
  215.         .is_ivybridge = 1,
  216.         GEN_DEFAULT_PIPEOFFSETS,
  217.         IVB_CURSOR_OFFSETS,
  218. };
  219.  
  220. static const struct intel_device_info intel_ivybridge_m_info = {
  221.         GEN7_FEATURES,
  222.         .is_ivybridge = 1,
  223.         .is_mobile = 1,
  224.         GEN_DEFAULT_PIPEOFFSETS,
  225.         IVB_CURSOR_OFFSETS,
  226. };
  227.  
  228. static const struct intel_device_info intel_ivybridge_q_info = {
  229.         GEN7_FEATURES,
  230.         .is_ivybridge = 1,
  231.         .num_pipes = 0, /* legal, last one wins */
  232.         GEN_DEFAULT_PIPEOFFSETS,
  233.         IVB_CURSOR_OFFSETS,
  234. };
  235.  
  236. static const struct intel_device_info intel_valleyview_m_info = {
  237.         GEN7_FEATURES,
  238.         .is_mobile = 1,
  239.         .num_pipes = 2,
  240.         .is_valleyview = 1,
  241.         .display_mmio_offset = VLV_DISPLAY_BASE,
  242.         .has_fbc = 0, /* legal, last one wins */
  243.         .has_llc = 0, /* legal, last one wins */
  244.         GEN_DEFAULT_PIPEOFFSETS,
  245.         CURSOR_OFFSETS,
  246. };
  247.  
  248. static const struct intel_device_info intel_valleyview_d_info = {
  249.         GEN7_FEATURES,
  250.         .num_pipes = 2,
  251.         .is_valleyview = 1,
  252.         .display_mmio_offset = VLV_DISPLAY_BASE,
  253.         .has_fbc = 0, /* legal, last one wins */
  254.         .has_llc = 0, /* legal, last one wins */
  255.         GEN_DEFAULT_PIPEOFFSETS,
  256.         CURSOR_OFFSETS,
  257. };
  258.  
  259. static const struct intel_device_info intel_haswell_d_info = {
  260.         GEN7_FEATURES,
  261.         .is_haswell = 1,
  262.         .has_ddi = 1,
  263.         .has_fpga_dbg = 1,
  264.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  265.         GEN_DEFAULT_PIPEOFFSETS,
  266.         IVB_CURSOR_OFFSETS,
  267. };
  268.  
  269. static const struct intel_device_info intel_haswell_m_info = {
  270.         GEN7_FEATURES,
  271.         .is_haswell = 1,
  272.         .is_mobile = 1,
  273.         .has_ddi = 1,
  274.         .has_fpga_dbg = 1,
  275.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  276.         GEN_DEFAULT_PIPEOFFSETS,
  277.         IVB_CURSOR_OFFSETS,
  278. };
  279.  
  280. static const struct intel_device_info intel_broadwell_d_info = {
  281.         .gen = 8, .num_pipes = 3,
  282.         .need_gfx_hws = 1, .has_hotplug = 1,
  283.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  284.         .has_llc = 1,
  285.         .has_ddi = 1,
  286.         .has_fpga_dbg = 1,
  287.         .has_fbc = 1,
  288.         GEN_DEFAULT_PIPEOFFSETS,
  289.         IVB_CURSOR_OFFSETS,
  290. };
  291.  
  292. static const struct intel_device_info intel_broadwell_m_info = {
  293.         .gen = 8, .is_mobile = 1, .num_pipes = 3,
  294.         .need_gfx_hws = 1, .has_hotplug = 1,
  295.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  296.         .has_llc = 1,
  297.         .has_ddi = 1,
  298.         .has_fpga_dbg = 1,
  299.         .has_fbc = 1,
  300.         GEN_DEFAULT_PIPEOFFSETS,
  301.         IVB_CURSOR_OFFSETS,
  302. };
  303.  
  304. static const struct intel_device_info intel_broadwell_gt3d_info = {
  305.         .gen = 8, .num_pipes = 3,
  306.         .need_gfx_hws = 1, .has_hotplug = 1,
  307.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  308.         .has_llc = 1,
  309.         .has_ddi = 1,
  310.         .has_fpga_dbg = 1,
  311.         .has_fbc = 1,
  312.         GEN_DEFAULT_PIPEOFFSETS,
  313.         IVB_CURSOR_OFFSETS,
  314. };
  315.  
  316. static const struct intel_device_info intel_broadwell_gt3m_info = {
  317.         .gen = 8, .is_mobile = 1, .num_pipes = 3,
  318.         .need_gfx_hws = 1, .has_hotplug = 1,
  319.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  320.         .has_llc = 1,
  321.         .has_ddi = 1,
  322.         .has_fpga_dbg = 1,
  323.         .has_fbc = 1,
  324.         GEN_DEFAULT_PIPEOFFSETS,
  325.         IVB_CURSOR_OFFSETS,
  326. };
  327.  
  328. static const struct intel_device_info intel_cherryview_info = {
  329.         .is_preliminary = 1,
  330.         .gen = 8, .num_pipes = 3,
  331.         .need_gfx_hws = 1, .has_hotplug = 1,
  332.         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  333.         .is_valleyview = 1,
  334.         .display_mmio_offset = VLV_DISPLAY_BASE,
  335.         GEN_CHV_PIPEOFFSETS,
  336.         CURSOR_OFFSETS,
  337. };
  338.  
  339. /*
  340.  * Make sure any device matches here are from most specific to most
  341.  * general.  For example, since the Quanta match is based on the subsystem
  342.  * and subvendor IDs, we need it to come before the more general IVB
  343.  * PCI ID matches, otherwise we'll use the wrong info struct above.
  344.  */
  345. #define INTEL_PCI_IDS \
  346.         INTEL_I915G_IDS(&intel_i915g_info),     \
  347.         INTEL_I915GM_IDS(&intel_i915gm_info),   \
  348.         INTEL_I945G_IDS(&intel_i945g_info),     \
  349.         INTEL_I945GM_IDS(&intel_i945gm_info),   \
  350.         INTEL_I965G_IDS(&intel_i965g_info),     \
  351.         INTEL_G33_IDS(&intel_g33_info),         \
  352.         INTEL_I965GM_IDS(&intel_i965gm_info),   \
  353.         INTEL_GM45_IDS(&intel_gm45_info),       \
  354.         INTEL_G45_IDS(&intel_g45_info),         \
  355.         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
  356.         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
  357.         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
  358.         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
  359.         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
  360.         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
  361.         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
  362.         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
  363.         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
  364.         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
  365.         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
  366.         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
  367.         INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),   \
  368.         INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
  369.         INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
  370.         INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
  371.         INTEL_CHV_IDS(&intel_cherryview_info)
  372.  
  373. static const struct pci_device_id pciidlist[] = {       /* aka */
  374.         INTEL_PCI_IDS,
  375.     {0, 0, 0}
  376. };
  377.  
  378. #define INTEL_PCH_DEVICE_ID_MASK        0xff00
  379. #define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
  380. #define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
  381. #define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
  382. #define INTEL_PCH_LPT_DEVICE_ID_TYPE    0x8c00
  383.  
  384. void intel_detect_pch(struct drm_device *dev)
  385. {
  386.     struct drm_i915_private *dev_priv = dev->dev_private;
  387.         struct pci_dev *pch = NULL;
  388.  
  389.         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
  390.          * (which really amounts to a PCH but no South Display).
  391.          */
  392.         if (INTEL_INFO(dev)->num_pipes == 0) {
  393.                 dev_priv->pch_type = PCH_NOP;
  394.                 return;
  395.         }
  396.  
  397.     /*
  398.      * The reason to probe ISA bridge instead of Dev31:Fun0 is to
  399.      * make graphics device passthrough work easy for VMM, that only
  400.      * need to expose ISA bridge to let driver know the real hardware
  401.      * underneath. This is a requirement from virtualization team.
  402.          *
  403.          * In some virtualized environments (e.g. XEN), there is irrelevant
  404.          * ISA bridge in the system. To work reliably, we should scan trhough
  405.          * all the ISA bridge devices and check for the first match, instead
  406.          * of only checking the first one.
  407.      */
  408.         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
  409.         if (pch->vendor == PCI_VENDOR_ID_INTEL) {
  410.                         unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
  411.                         dev_priv->pch_id = id;
  412.  
  413.             if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
  414.                 dev_priv->pch_type = PCH_IBX;
  415.                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
  416.                                 WARN_ON(!IS_GEN5(dev));
  417.             } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
  418.                 dev_priv->pch_type = PCH_CPT;
  419.                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
  420.                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  421.             } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
  422.                 /* PantherPoint is CPT compatible */
  423.                 dev_priv->pch_type = PCH_CPT;
  424.                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
  425.                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  426.                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  427.                                 dev_priv->pch_type = PCH_LPT;
  428.                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
  429.                                 WARN_ON(!IS_HASWELL(dev));
  430.                                 WARN_ON(IS_ULT(dev));
  431.                         } else if (IS_BROADWELL(dev)) {
  432.                                 dev_priv->pch_type = PCH_LPT;
  433.                                 dev_priv->pch_id =
  434.                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
  435.                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
  436.                                               "LynxPoint LP PCH\n");
  437.                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
  438.                                 dev_priv->pch_type = PCH_LPT;
  439.                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
  440.                                 WARN_ON(!IS_HASWELL(dev));
  441.                                 WARN_ON(!IS_ULT(dev));
  442.                         } else
  443.                                 continue;
  444.  
  445.                         break;
  446.         }
  447.     }
  448.         if (!pch)
  449.                 DRM_DEBUG_KMS("No PCH found.\n");
  450.  
  451. //      pci_dev_put(pch);
  452. }
  453.  
  454. bool i915_semaphore_is_enabled(struct drm_device *dev)
  455. {
  456.         if (INTEL_INFO(dev)->gen < 6)
  457.                 return false;
  458.  
  459.         if (i915.semaphores >= 0)
  460.                 return i915.semaphores;
  461.  
  462.         /* Until we get further testing... */
  463.         if (IS_GEN8(dev))
  464.                 return false;
  465.  
  466. #ifdef CONFIG_INTEL_IOMMU
  467.         /* Enable semaphores on SNB when IO remapping is off */
  468.         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
  469.                 return false;
  470. #endif
  471.  
  472.         return true;
  473. }
  474.  
  475. #if 0
  476. static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
  477. {
  478.         struct drm_device *dev = dev_priv->dev;
  479.         struct drm_encoder *encoder;
  480.  
  481.         drm_modeset_lock_all(dev);
  482.         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  483.                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  484.  
  485.                 if (intel_encoder->suspend)
  486.                         intel_encoder->suspend(intel_encoder);
  487.         }
  488.         drm_modeset_unlock_all(dev);
  489. }
  490.  
  491. static int i915_drm_freeze(struct drm_device *dev)
  492. {
  493.         struct drm_i915_private *dev_priv = dev->dev_private;
  494.         struct drm_crtc *crtc;
  495.         pci_power_t opregion_target_state;
  496.  
  497.         /* ignore lid events during suspend */
  498.         mutex_lock(&dev_priv->modeset_restore_lock);
  499.         dev_priv->modeset_restore = MODESET_SUSPENDED;
  500.         mutex_unlock(&dev_priv->modeset_restore_lock);
  501.  
  502.         /* We do a lot of poking in a lot of registers, make sure they work
  503.          * properly. */
  504.         intel_display_set_init_power(dev_priv, true);
  505.  
  506.         drm_kms_helper_poll_disable(dev);
  507.  
  508.         pci_save_state(dev->pdev);
  509.  
  510.         /* If KMS is active, we do the leavevt stuff here */
  511.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  512.                 int error;
  513.  
  514.                 error = i915_gem_suspend(dev);
  515.                 if (error) {
  516.                         dev_err(&dev->pdev->dev,
  517.                                 "GEM idle failed, resume might fail\n");
  518.                         return error;
  519.                 }
  520.  
  521.                 /*
  522.                  * Disable CRTCs directly since we want to preserve sw state
  523.                  * for _thaw. Also, power gate the CRTC power wells.
  524.                  */
  525.                 drm_modeset_lock_all(dev);
  526.                 for_each_crtc(dev, crtc)
  527.                         intel_crtc_control(crtc, false);
  528.                 drm_modeset_unlock_all(dev);
  529.  
  530.                 intel_dp_mst_suspend(dev);
  531.  
  532.                 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  533.  
  534.                 intel_runtime_pm_disable_interrupts(dev);
  535.                 intel_suspend_encoders(dev_priv);
  536.  
  537.                 intel_suspend_gt_powersave(dev);
  538.  
  539.                 intel_modeset_suspend_hw(dev);
  540.         }
  541.  
  542.         i915_gem_suspend_gtt_mappings(dev);
  543.  
  544.         i915_save_state(dev);
  545.  
  546.         opregion_target_state = PCI_D3cold;
  547. #if IS_ENABLED(CONFIG_ACPI_SLEEP)
  548.         if (acpi_target_system_state() < ACPI_STATE_S3)
  549.                 opregion_target_state = PCI_D1;
  550. #endif
  551.         intel_opregion_notify_adapter(dev, opregion_target_state);
  552.  
  553.         intel_uncore_forcewake_reset(dev, false);
  554.         intel_opregion_fini(dev);
  555.  
  556.         console_lock();
  557.         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
  558.         console_unlock();
  559.  
  560.         dev_priv->suspend_count++;
  561.  
  562.         intel_display_set_init_power(dev_priv, false);
  563.  
  564.         return 0;
  565. }
  566.  
  567. int i915_suspend(struct drm_device *dev, pm_message_t state)
  568. {
  569.         int error;
  570.  
  571.         if (!dev || !dev->dev_private) {
  572.                 DRM_ERROR("dev: %p\n", dev);
  573.                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
  574.                 return -ENODEV;
  575.         }
  576.  
  577.         if (state.event == PM_EVENT_PRETHAW)
  578.                 return 0;
  579.  
  580.  
  581.         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  582.                 return 0;
  583.  
  584.         error = i915_drm_freeze(dev);
  585.         if (error)
  586.                 return error;
  587.  
  588.         if (state.event == PM_EVENT_SUSPEND) {
  589.                 /* Shut down the device */
  590.                 pci_disable_device(dev->pdev);
  591.                 pci_set_power_state(dev->pdev, PCI_D3hot);
  592.         }
  593.  
  594.         return 0;
  595. }
  596.  
  597. void intel_console_resume(struct work_struct *work)
  598. {
  599.         struct drm_i915_private *dev_priv =
  600.                 container_of(work, struct drm_i915_private,
  601.                              console_resume_work);
  602.         struct drm_device *dev = dev_priv->dev;
  603.  
  604.         console_lock();
  605.         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
  606.         console_unlock();
  607. }
  608.  
  609. static int i915_drm_thaw_early(struct drm_device *dev)
  610. {
  611.         struct drm_i915_private *dev_priv = dev->dev_private;
  612.  
  613.         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  614.                 hsw_disable_pc8(dev_priv);
  615.  
  616.         intel_uncore_early_sanitize(dev, true);
  617.         intel_uncore_sanitize(dev);
  618.         intel_power_domains_init_hw(dev_priv);
  619.  
  620.         return 0;
  621. }
  622.  
  623. static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
  624. {
  625.         struct drm_i915_private *dev_priv = dev->dev_private;
  626.  
  627.         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
  628.             restore_gtt_mappings) {
  629.                 mutex_lock(&dev->struct_mutex);
  630.                 i915_gem_restore_gtt_mappings(dev);
  631.                 mutex_unlock(&dev->struct_mutex);
  632.         }
  633.  
  634.         i915_restore_state(dev);
  635.         intel_opregion_setup(dev);
  636.  
  637.         /* KMS EnterVT equivalent */
  638.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  639.                 intel_init_pch_refclk(dev);
  640.                 drm_mode_config_reset(dev);
  641.  
  642.                 mutex_lock(&dev->struct_mutex);
  643.                 if (i915_gem_init_hw(dev)) {
  644.                         DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
  645.                         atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
  646.                 }
  647.                 mutex_unlock(&dev->struct_mutex);
  648.  
  649.                 intel_runtime_pm_restore_interrupts(dev);
  650.  
  651.                 intel_modeset_init_hw(dev);
  652.  
  653.                 {
  654.                         unsigned long irqflags;
  655.                         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  656.                         if (dev_priv->display.hpd_irq_setup)
  657.                                 dev_priv->display.hpd_irq_setup(dev);
  658.                         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  659.                 }
  660.  
  661.                 intel_dp_mst_resume(dev);
  662.                 drm_modeset_lock_all(dev);
  663.                 intel_modeset_setup_hw_state(dev, true);
  664.                 drm_modeset_unlock_all(dev);
  665.  
  666.                 /*
  667.                  * ... but also need to make sure that hotplug processing
  668.                  * doesn't cause havoc. Like in the driver load code we don't
  669.                  * bother with the tiny race here where we might loose hotplug
  670.                  * notifications.
  671.                  * */
  672.                 intel_hpd_init(dev);
  673.                 /* Config may have changed between suspend and resume */
  674.                 drm_helper_hpd_irq_event(dev);
  675.         }
  676.  
  677.         intel_opregion_init(dev);
  678.  
  679.         /*
  680.          * The console lock can be pretty contented on resume due
  681.          * to all the printk activity.  Try to keep it out of the hot
  682.          * path of resume if possible.
  683.          */
  684.         if (console_trylock()) {
  685.                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
  686.                 console_unlock();
  687.         } else {
  688.                 schedule_work(&dev_priv->console_resume_work);
  689.         }
  690.  
  691.         mutex_lock(&dev_priv->modeset_restore_lock);
  692.         dev_priv->modeset_restore = MODESET_DONE;
  693.         mutex_unlock(&dev_priv->modeset_restore_lock);
  694.  
  695.         intel_opregion_notify_adapter(dev, PCI_D0);
  696.  
  697.         return 0;
  698. }
  699.  
  700. static int i915_drm_thaw(struct drm_device *dev)
  701. {
  702.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  703.                 i915_check_and_clear_faults(dev);
  704.  
  705.         return __i915_drm_thaw(dev, true);
  706. }
  707.  
  708. static int i915_resume_early(struct drm_device *dev)
  709. {
  710.         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  711.                 return 0;
  712.  
  713.         /*
  714.          * We have a resume ordering issue with the snd-hda driver also
  715.          * requiring our device to be power up. Due to the lack of a
  716.          * parent/child relationship we currently solve this with an early
  717.          * resume hook.
  718.          *
  719.          * FIXME: This should be solved with a special hdmi sink device or
  720.          * similar so that power domains can be employed.
  721.          */
  722.         if (pci_enable_device(dev->pdev))
  723.                 return -EIO;
  724.  
  725.         pci_set_master(dev->pdev);
  726.  
  727.         return i915_drm_thaw_early(dev);
  728. }
  729.  
  730. int i915_resume(struct drm_device *dev)
  731. {
  732.         struct drm_i915_private *dev_priv = dev->dev_private;
  733.         int ret;
  734.  
  735.         /*
  736.          * Platforms with opregion should have sane BIOS, older ones (gen3 and
  737.          * earlier) need to restore the GTT mappings since the BIOS might clear
  738.          * all our scratch PTEs.
  739.          */
  740.         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
  741.         if (ret)
  742.                 return ret;
  743.  
  744.         drm_kms_helper_poll_enable(dev);
  745.         return 0;
  746. }
  747.  
  748. static int i915_resume_legacy(struct drm_device *dev)
  749. {
  750.         i915_resume_early(dev);
  751.         i915_resume(dev);
  752.  
  753.         return 0;
  754. }
  755.  
  756. /**
  757.  * i915_reset - reset chip after a hang
  758.  * @dev: drm device to reset
  759.  *
  760.  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
  761.  * reset or otherwise an error code.
  762.  *
  763.  * Procedure is fairly simple:
  764.  *   - reset the chip using the reset reg
  765.  *   - re-init context state
  766.  *   - re-init hardware status page
  767.  *   - re-init ring buffer
  768.  *   - re-init interrupt state
  769.  *   - re-init display
  770.  */
  771. int i915_reset(struct drm_device *dev)
  772. {
  773.         struct drm_i915_private *dev_priv = dev->dev_private;
  774.         bool simulated;
  775.         int ret;
  776.  
  777.         if (!i915.reset)
  778.                 return 0;
  779.  
  780.         mutex_lock(&dev->struct_mutex);
  781.  
  782.         i915_gem_reset(dev);
  783.  
  784.         simulated = dev_priv->gpu_error.stop_rings != 0;
  785.  
  786.                 ret = intel_gpu_reset(dev);
  787.  
  788.                 /* Also reset the gpu hangman. */
  789.                 if (simulated) {
  790.                         DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
  791.                         dev_priv->gpu_error.stop_rings = 0;
  792.                         if (ret == -ENODEV) {
  793.                         DRM_INFO("Reset not implemented, but ignoring "
  794.                                           "error for simulated gpu hangs\n");
  795.                                 ret = 0;
  796.                         }
  797.         }
  798.  
  799.         if (ret) {
  800.                 DRM_ERROR("Failed to reset chip: %i\n", ret);
  801.                 mutex_unlock(&dev->struct_mutex);
  802.                 return ret;
  803.         }
  804.  
  805.         /* Ok, now get things going again... */
  806.  
  807.         /*
  808.          * Everything depends on having the GTT running, so we need to start
  809.          * there.  Fortunately we don't need to do this unless we reset the
  810.          * chip at a PCI level.
  811.          *
  812.          * Next we need to restore the context, but we don't use those
  813.          * yet either...
  814.          *
  815.          * Ring buffer needs to be re-initialized in the KMS case, or if X
  816.          * was running at the time of the reset (i.e. we weren't VT
  817.          * switched away).
  818.          */
  819.         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
  820.                         !dev_priv->ums.mm_suspended) {
  821.                 dev_priv->ums.mm_suspended = 0;
  822.  
  823.                 ret = i915_gem_init_hw(dev);
  824.                 mutex_unlock(&dev->struct_mutex);
  825.                 if (ret) {
  826.                         DRM_ERROR("Failed hw init on reset %d\n", ret);
  827.                         return ret;
  828.                 }
  829.  
  830.                 /*
  831.                  * FIXME: This races pretty badly against concurrent holders of
  832.                  * ring interrupts. This is possible since we've started to drop
  833.                  * dev->struct_mutex in select places when waiting for the gpu.
  834.                  */
  835.  
  836.                 /*
  837.                  * rps/rc6 re-init is necessary to restore state lost after the
  838.                  * reset and the re-install of gt irqs. Skip for ironlake per
  839.                  * previous concerns that it doesn't respond well to some forms
  840.                  * of re-init after reset.
  841.                  */
  842.                 if (INTEL_INFO(dev)->gen > 5)
  843.                         intel_reset_gt_powersave(dev);
  844.  
  845.                 intel_hpd_init(dev);
  846.         } else {
  847.                 mutex_unlock(&dev->struct_mutex);
  848.         }
  849.  
  850.         return 0;
  851. }
  852.  
  853. static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  854. {
  855.         struct intel_device_info *intel_info =
  856.                 (struct intel_device_info *) ent->driver_data;
  857.  
  858.         if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
  859.                 DRM_INFO("This hardware requires preliminary hardware support.\n"
  860.                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
  861.                 return -ENODEV;
  862.         }
  863.  
  864.         /* Only bind to function 0 of the device. Early generations
  865.          * used function 1 as a placeholder for multi-head. This causes
  866.          * us confusion instead, especially on the systems where both
  867.          * functions have the same PCI-ID!
  868.          */
  869.         if (PCI_FUNC(pdev->devfn))
  870.                 return -ENODEV;
  871.  
  872.         driver.driver_features &= ~(DRIVER_USE_AGP);
  873.  
  874.         return drm_get_pci_dev(pdev, ent, &driver);
  875. }
  876.  
  877. static void
  878. i915_pci_remove(struct pci_dev *pdev)
  879. {
  880.         struct drm_device *dev = pci_get_drvdata(pdev);
  881.  
  882.         drm_put_dev(dev);
  883. }
  884.  
  885. static int i915_pm_suspend(struct device *dev)
  886. {
  887.         struct pci_dev *pdev = to_pci_dev(dev);
  888.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  889.  
  890.         if (!drm_dev || !drm_dev->dev_private) {
  891.                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
  892.                 return -ENODEV;
  893.         }
  894.  
  895.         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  896.                 return 0;
  897.  
  898.         return i915_drm_freeze(drm_dev);
  899. }
  900.  
  901. static int i915_pm_suspend_late(struct device *dev)
  902. {
  903.         struct pci_dev *pdev = to_pci_dev(dev);
  904.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  905.         struct drm_i915_private *dev_priv = drm_dev->dev_private;
  906.  
  907.         /*
  908.          * We have a suspedn ordering issue with the snd-hda driver also
  909.          * requiring our device to be power up. Due to the lack of a
  910.          * parent/child relationship we currently solve this with an late
  911.          * suspend hook.
  912.          *
  913.          * FIXME: This should be solved with a special hdmi sink device or
  914.          * similar so that power domains can be employed.
  915.          */
  916.         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  917.                 return 0;
  918.  
  919.         if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
  920.                 hsw_enable_pc8(dev_priv);
  921.  
  922.         pci_disable_device(pdev);
  923.         pci_set_power_state(pdev, PCI_D3hot);
  924.  
  925.         return 0;
  926. }
  927.  
  928. static int i915_pm_resume_early(struct device *dev)
  929. {
  930.         struct pci_dev *pdev = to_pci_dev(dev);
  931.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  932.  
  933.         return i915_resume_early(drm_dev);
  934. }
  935.  
  936. static int i915_pm_resume(struct device *dev)
  937. {
  938.         struct pci_dev *pdev = to_pci_dev(dev);
  939.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  940.  
  941.         return i915_resume(drm_dev);
  942. }
  943.  
  944. static int i915_pm_freeze(struct device *dev)
  945. {
  946.         struct pci_dev *pdev = to_pci_dev(dev);
  947.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  948.  
  949.         if (!drm_dev || !drm_dev->dev_private) {
  950.                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
  951.                 return -ENODEV;
  952.         }
  953.  
  954.         return i915_drm_freeze(drm_dev);
  955. }
  956.  
  957. static int i915_pm_thaw_early(struct device *dev)
  958. {
  959.         struct pci_dev *pdev = to_pci_dev(dev);
  960.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  961.  
  962.         return i915_drm_thaw_early(drm_dev);
  963. }
  964.  
  965. static int i915_pm_thaw(struct device *dev)
  966. {
  967.         struct pci_dev *pdev = to_pci_dev(dev);
  968.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  969.  
  970.         return i915_drm_thaw(drm_dev);
  971. }
  972.  
  973. static int i915_pm_poweroff(struct device *dev)
  974. {
  975.         struct pci_dev *pdev = to_pci_dev(dev);
  976.         struct drm_device *drm_dev = pci_get_drvdata(pdev);
  977.  
  978.         return i915_drm_freeze(drm_dev);
  979. }
  980.  
  981. static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
  982. {
  983.         hsw_enable_pc8(dev_priv);
  984.  
  985.         return 0;
  986. }
  987.  
  988. static int snb_runtime_resume(struct drm_i915_private *dev_priv)
  989. {
  990.         struct drm_device *dev = dev_priv->dev;
  991.  
  992.         intel_init_pch_refclk(dev);
  993.  
  994.         return 0;
  995. }
  996.  
  997. static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
  998. {
  999.         hsw_disable_pc8(dev_priv);
  1000.  
  1001.         return 0;
  1002. }
  1003.  
  1004. /*
  1005.  * Save all Gunit registers that may be lost after a D3 and a subsequent
  1006.  * S0i[R123] transition. The list of registers needing a save/restore is
  1007.  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
  1008.  * registers in the following way:
  1009.  * - Driver: saved/restored by the driver
  1010.  * - Punit : saved/restored by the Punit firmware
  1011.  * - No, w/o marking: no need to save/restore, since the register is R/O or
  1012.  *                    used internally by the HW in a way that doesn't depend
  1013.  *                    keeping the content across a suspend/resume.
  1014.  * - Debug : used for debugging
  1015.  *
  1016.  * We save/restore all registers marked with 'Driver', with the following
  1017.  * exceptions:
  1018.  * - Registers out of use, including also registers marked with 'Debug'.
  1019.  *   These have no effect on the driver's operation, so we don't save/restore
  1020.  *   them to reduce the overhead.
  1021.  * - Registers that are fully setup by an initialization function called from
  1022.  *   the resume path. For example many clock gating and RPS/RC6 registers.
  1023.  * - Registers that provide the right functionality with their reset defaults.
  1024.  *
  1025.  * TODO: Except for registers that based on the above 3 criteria can be safely
  1026.  * ignored, we save/restore all others, practically treating the HW context as
  1027.  * a black-box for the driver. Further investigation is needed to reduce the
  1028.  * saved/restored registers even further, by following the same 3 criteria.
  1029.  */
  1030. static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
  1031. {
  1032.         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
  1033.         int i;
  1034.  
  1035.         /* GAM 0x4000-0x4770 */
  1036.         s->wr_watermark         = I915_READ(GEN7_WR_WATERMARK);
  1037.         s->gfx_prio_ctrl        = I915_READ(GEN7_GFX_PRIO_CTRL);
  1038.         s->arb_mode             = I915_READ(ARB_MODE);
  1039.         s->gfx_pend_tlb0        = I915_READ(GEN7_GFX_PEND_TLB0);
  1040.         s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
  1041.  
  1042.         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
  1043.                 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
  1044.  
  1045.         s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
  1046.         s->gfx_max_req_count    = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
  1047.  
  1048.         s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
  1049.         s->ecochk               = I915_READ(GAM_ECOCHK);
  1050.         s->bsd_hwsp             = I915_READ(BSD_HWS_PGA_GEN7);
  1051.         s->blt_hwsp             = I915_READ(BLT_HWS_PGA_GEN7);
  1052.  
  1053.         s->tlb_rd_addr          = I915_READ(GEN7_TLB_RD_ADDR);
  1054.  
  1055.         /* MBC 0x9024-0x91D0, 0x8500 */
  1056.         s->g3dctl               = I915_READ(VLV_G3DCTL);
  1057.         s->gsckgctl             = I915_READ(VLV_GSCKGCTL);
  1058.         s->mbctl                = I915_READ(GEN6_MBCTL);
  1059.  
  1060.         /* GCP 0x9400-0x9424, 0x8100-0x810C */
  1061.         s->ucgctl1              = I915_READ(GEN6_UCGCTL1);
  1062.         s->ucgctl3              = I915_READ(GEN6_UCGCTL3);
  1063.         s->rcgctl1              = I915_READ(GEN6_RCGCTL1);
  1064.         s->rcgctl2              = I915_READ(GEN6_RCGCTL2);
  1065.         s->rstctl               = I915_READ(GEN6_RSTCTL);
  1066.         s->misccpctl            = I915_READ(GEN7_MISCCPCTL);
  1067.  
  1068.         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
  1069.         s->gfxpause             = I915_READ(GEN6_GFXPAUSE);
  1070.         s->rpdeuhwtc            = I915_READ(GEN6_RPDEUHWTC);
  1071.         s->rpdeuc               = I915_READ(GEN6_RPDEUC);
  1072.         s->ecobus               = I915_READ(ECOBUS);
  1073.         s->pwrdwnupctl          = I915_READ(VLV_PWRDWNUPCTL);
  1074.         s->rp_down_timeout      = I915_READ(GEN6_RP_DOWN_TIMEOUT);
  1075.         s->rp_deucsw            = I915_READ(GEN6_RPDEUCSW);
  1076.         s->rcubmabdtmr          = I915_READ(GEN6_RCUBMABDTMR);
  1077.         s->rcedata              = I915_READ(VLV_RCEDATA);
  1078.         s->spare2gh             = I915_READ(VLV_SPAREG2H);
  1079.  
  1080.         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
  1081.         s->gt_imr               = I915_READ(GTIMR);
  1082.         s->gt_ier               = I915_READ(GTIER);
  1083.         s->pm_imr               = I915_READ(GEN6_PMIMR);
  1084.         s->pm_ier               = I915_READ(GEN6_PMIER);
  1085.  
  1086.         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
  1087.                 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
  1088.  
  1089.         /* GT SA CZ domain, 0x100000-0x138124 */
  1090.         s->tilectl              = I915_READ(TILECTL);
  1091.         s->gt_fifoctl           = I915_READ(GTFIFOCTL);
  1092.         s->gtlc_wake_ctrl       = I915_READ(VLV_GTLC_WAKE_CTRL);
  1093.         s->gtlc_survive         = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1094.         s->pmwgicz              = I915_READ(VLV_PMWGICZ);
  1095.  
  1096.         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
  1097.         s->gu_ctl0              = I915_READ(VLV_GU_CTL0);
  1098.         s->gu_ctl1              = I915_READ(VLV_GU_CTL1);
  1099.         s->clock_gate_dis2      = I915_READ(VLV_GUNIT_CLOCK_GATE2);
  1100.  
  1101.         /*
  1102.          * Not saving any of:
  1103.          * DFT,         0x9800-0x9EC0
  1104.          * SARB,        0xB000-0xB1FC
  1105.          * GAC,         0x5208-0x524C, 0x14000-0x14C000
  1106.          * PCI CFG
  1107.          */
  1108. }
  1109.  
  1110. static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
  1111. {
  1112.         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
  1113.         u32 val;
  1114.         int i;
  1115.  
  1116.         /* GAM 0x4000-0x4770 */
  1117.         I915_WRITE(GEN7_WR_WATERMARK,   s->wr_watermark);
  1118.         I915_WRITE(GEN7_GFX_PRIO_CTRL,  s->gfx_prio_ctrl);
  1119.         I915_WRITE(ARB_MODE,            s->arb_mode | (0xffff << 16));
  1120.         I915_WRITE(GEN7_GFX_PEND_TLB0,  s->gfx_pend_tlb0);
  1121.         I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
  1122.  
  1123.         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
  1124.                 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
  1125.  
  1126.         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
  1127.         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
  1128.  
  1129.         I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
  1130.         I915_WRITE(GAM_ECOCHK,          s->ecochk);
  1131.         I915_WRITE(BSD_HWS_PGA_GEN7,    s->bsd_hwsp);
  1132.         I915_WRITE(BLT_HWS_PGA_GEN7,    s->blt_hwsp);
  1133.  
  1134.         I915_WRITE(GEN7_TLB_RD_ADDR,    s->tlb_rd_addr);
  1135.  
  1136.         /* MBC 0x9024-0x91D0, 0x8500 */
  1137.         I915_WRITE(VLV_G3DCTL,          s->g3dctl);
  1138.         I915_WRITE(VLV_GSCKGCTL,        s->gsckgctl);
  1139.         I915_WRITE(GEN6_MBCTL,          s->mbctl);
  1140.  
  1141.         /* GCP 0x9400-0x9424, 0x8100-0x810C */
  1142.         I915_WRITE(GEN6_UCGCTL1,        s->ucgctl1);
  1143.         I915_WRITE(GEN6_UCGCTL3,        s->ucgctl3);
  1144.         I915_WRITE(GEN6_RCGCTL1,        s->rcgctl1);
  1145.         I915_WRITE(GEN6_RCGCTL2,        s->rcgctl2);
  1146.         I915_WRITE(GEN6_RSTCTL,         s->rstctl);
  1147.         I915_WRITE(GEN7_MISCCPCTL,      s->misccpctl);
  1148.  
  1149.         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
  1150.         I915_WRITE(GEN6_GFXPAUSE,       s->gfxpause);
  1151.         I915_WRITE(GEN6_RPDEUHWTC,      s->rpdeuhwtc);
  1152.         I915_WRITE(GEN6_RPDEUC,         s->rpdeuc);
  1153.         I915_WRITE(ECOBUS,              s->ecobus);
  1154.         I915_WRITE(VLV_PWRDWNUPCTL,     s->pwrdwnupctl);
  1155.         I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
  1156.         I915_WRITE(GEN6_RPDEUCSW,       s->rp_deucsw);
  1157.         I915_WRITE(GEN6_RCUBMABDTMR,    s->rcubmabdtmr);
  1158.         I915_WRITE(VLV_RCEDATA,         s->rcedata);
  1159.         I915_WRITE(VLV_SPAREG2H,        s->spare2gh);
  1160.  
  1161.         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
  1162.         I915_WRITE(GTIMR,               s->gt_imr);
  1163.         I915_WRITE(GTIER,               s->gt_ier);
  1164.         I915_WRITE(GEN6_PMIMR,          s->pm_imr);
  1165.         I915_WRITE(GEN6_PMIER,          s->pm_ier);
  1166.  
  1167.         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
  1168.                 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
  1169.  
  1170.         /* GT SA CZ domain, 0x100000-0x138124 */
  1171.         I915_WRITE(TILECTL,                     s->tilectl);
  1172.         I915_WRITE(GTFIFOCTL,                   s->gt_fifoctl);
  1173.         /*
  1174.          * Preserve the GT allow wake and GFX force clock bit, they are not
  1175.          * be restored, as they are used to control the s0ix suspend/resume
  1176.          * sequence by the caller.
  1177.          */
  1178.         val = I915_READ(VLV_GTLC_WAKE_CTRL);
  1179.         val &= VLV_GTLC_ALLOWWAKEREQ;
  1180.         val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
  1181.         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
  1182.  
  1183.         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1184.         val &= VLV_GFX_CLK_FORCE_ON_BIT;
  1185.         val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
  1186.         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
  1187.  
  1188.         I915_WRITE(VLV_PMWGICZ,                 s->pmwgicz);
  1189.  
  1190.         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
  1191.         I915_WRITE(VLV_GU_CTL0,                 s->gu_ctl0);
  1192.         I915_WRITE(VLV_GU_CTL1,                 s->gu_ctl1);
  1193.         I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
  1194. }
  1195. #endif
  1196.  
  1197. int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
  1198. {
  1199.         u32 val;
  1200.         int err;
  1201.  
  1202.         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1203.         WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
  1204.  
  1205. #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
  1206.         /* Wait for a previous force-off to settle */
  1207.         if (force_on) {
  1208.                 err = wait_for(!COND, 20);
  1209.                 if (err) {
  1210.                         DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
  1211.                                   I915_READ(VLV_GTLC_SURVIVABILITY_REG));
  1212.                         return err;
  1213.                 }
  1214.         }
  1215.  
  1216.         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1217.         val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
  1218.         if (force_on)
  1219.                 val |= VLV_GFX_CLK_FORCE_ON_BIT;
  1220.         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
  1221.  
  1222.         if (!force_on)
  1223.                 return 0;
  1224.  
  1225.         err = wait_for(COND, 20);
  1226.         if (err)
  1227.                 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
  1228.                           I915_READ(VLV_GTLC_SURVIVABILITY_REG));
  1229.  
  1230.         return err;
  1231. #undef COND
  1232. }
  1233. #if 0
  1234. static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
  1235. {
  1236.         u32 val;
  1237.         int err = 0;
  1238.  
  1239.         val = I915_READ(VLV_GTLC_WAKE_CTRL);
  1240.         val &= ~VLV_GTLC_ALLOWWAKEREQ;
  1241.         if (allow)
  1242.                 val |= VLV_GTLC_ALLOWWAKEREQ;
  1243.         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
  1244.         POSTING_READ(VLV_GTLC_WAKE_CTRL);
  1245.  
  1246. #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
  1247.               allow)
  1248.         err = wait_for(COND, 1);
  1249.         if (err)
  1250.                 DRM_ERROR("timeout disabling GT waking\n");
  1251.         return err;
  1252. #undef COND
  1253. }
  1254.  
  1255. static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
  1256.                                  bool wait_for_on)
  1257. {
  1258.         u32 mask;
  1259.         u32 val;
  1260.         int err;
  1261.  
  1262.         mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
  1263.         val = wait_for_on ? mask : 0;
  1264. #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
  1265.         if (COND)
  1266.                 return 0;
  1267.  
  1268.         DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
  1269.                         wait_for_on ? "on" : "off",
  1270.                         I915_READ(VLV_GTLC_PW_STATUS));
  1271.  
  1272.         /*
  1273.          * RC6 transitioning can be delayed up to 2 msec (see
  1274.          * valleyview_enable_rps), use 3 msec for safety.
  1275.          */
  1276.         err = wait_for(COND, 3);
  1277.         if (err)
  1278.                 DRM_ERROR("timeout waiting for GT wells to go %s\n",
  1279.                           wait_for_on ? "on" : "off");
  1280.  
  1281.         return err;
  1282. #undef COND
  1283. }
  1284.  
  1285. static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
  1286. {
  1287.         if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
  1288.                 return;
  1289.  
  1290.         DRM_ERROR("GT register access while GT waking disabled\n");
  1291.         I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
  1292. }
  1293.  
  1294. static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
  1295. {
  1296.         u32 mask;
  1297.         int err;
  1298.  
  1299.         /*
  1300.          * Bspec defines the following GT well on flags as debug only, so
  1301.          * don't treat them as hard failures.
  1302.          */
  1303.         (void)vlv_wait_for_gt_wells(dev_priv, false);
  1304.  
  1305.         mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
  1306.         WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
  1307.  
  1308.         vlv_check_no_gt_access(dev_priv);
  1309.  
  1310.         err = vlv_force_gfx_clock(dev_priv, true);
  1311.         if (err)
  1312.                 goto err1;
  1313.  
  1314.         err = vlv_allow_gt_wake(dev_priv, false);
  1315.         if (err)
  1316.                 goto err2;
  1317.         vlv_save_gunit_s0ix_state(dev_priv);
  1318.  
  1319.         err = vlv_force_gfx_clock(dev_priv, false);
  1320.         if (err)
  1321.                 goto err2;
  1322.  
  1323.         return 0;
  1324.  
  1325. err2:
  1326.         /* For safety always re-enable waking and disable gfx clock forcing */
  1327.         vlv_allow_gt_wake(dev_priv, true);
  1328. err1:
  1329.         vlv_force_gfx_clock(dev_priv, false);
  1330.  
  1331.         return err;
  1332. }
  1333.  
  1334. static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
  1335. {
  1336.         struct drm_device *dev = dev_priv->dev;
  1337.         int err;
  1338.         int ret;
  1339.  
  1340.         /*
  1341.          * If any of the steps fail just try to continue, that's the best we
  1342.          * can do at this point. Return the first error code (which will also
  1343.          * leave RPM permanently disabled).
  1344.          */
  1345.         ret = vlv_force_gfx_clock(dev_priv, true);
  1346.  
  1347.         vlv_restore_gunit_s0ix_state(dev_priv);
  1348.  
  1349.         err = vlv_allow_gt_wake(dev_priv, true);
  1350.         if (!ret)
  1351.                 ret = err;
  1352.  
  1353.         err = vlv_force_gfx_clock(dev_priv, false);
  1354.         if (!ret)
  1355.                 ret = err;
  1356.  
  1357.         vlv_check_no_gt_access(dev_priv);
  1358.  
  1359.         intel_init_clock_gating(dev);
  1360.         i915_gem_restore_fences(dev);
  1361.  
  1362.         return ret;
  1363. }
  1364.  
  1365. static int intel_runtime_suspend(struct device *device)
  1366. {
  1367.         struct pci_dev *pdev = to_pci_dev(device);
  1368.         struct drm_device *dev = pci_get_drvdata(pdev);
  1369.         struct drm_i915_private *dev_priv = dev->dev_private;
  1370.         int ret;
  1371.  
  1372.         if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
  1373.                 return -ENODEV;
  1374.  
  1375.         WARN_ON(!HAS_RUNTIME_PM(dev));
  1376.         assert_force_wake_inactive(dev_priv);
  1377.  
  1378.         DRM_DEBUG_KMS("Suspending device\n");
  1379.  
  1380.         /*
  1381.          * We could deadlock here in case another thread holding struct_mutex
  1382.          * calls RPM suspend concurrently, since the RPM suspend will wait
  1383.          * first for this RPM suspend to finish. In this case the concurrent
  1384.          * RPM resume will be followed by its RPM suspend counterpart. Still
  1385.          * for consistency return -EAGAIN, which will reschedule this suspend.
  1386.          */
  1387.         if (!mutex_trylock(&dev->struct_mutex)) {
  1388.                 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
  1389.                 /*
  1390.                  * Bump the expiration timestamp, otherwise the suspend won't
  1391.                  * be rescheduled.
  1392.                  */
  1393.                 pm_runtime_mark_last_busy(device);
  1394.  
  1395.                 return -EAGAIN;
  1396.         }
  1397.         /*
  1398.          * We are safe here against re-faults, since the fault handler takes
  1399.          * an RPM reference.
  1400.          */
  1401.         i915_gem_release_all_mmaps(dev_priv);
  1402.         mutex_unlock(&dev->struct_mutex);
  1403.  
  1404.         /*
  1405.          * rps.work can't be rearmed here, since we get here only after making
  1406.          * sure the GPU is idle and the RPS freq is set to the minimum. See
  1407.          * intel_mark_idle().
  1408.          */
  1409.         cancel_work_sync(&dev_priv->rps.work);
  1410.         intel_runtime_pm_disable_interrupts(dev);
  1411.  
  1412.         if (IS_GEN6(dev)) {
  1413.                 ret = 0;
  1414.         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  1415.                 ret = hsw_runtime_suspend(dev_priv);
  1416.         } else if (IS_VALLEYVIEW(dev)) {
  1417.                 ret = vlv_runtime_suspend(dev_priv);
  1418.         } else {
  1419.                 ret = -ENODEV;
  1420.                 WARN_ON(1);
  1421.         }
  1422.  
  1423.         if (ret) {
  1424.                 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
  1425.                 intel_runtime_pm_restore_interrupts(dev);
  1426.  
  1427.                 return ret;
  1428.         }
  1429.  
  1430.         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  1431.         dev_priv->pm.suspended = true;
  1432.  
  1433.         /*
  1434.          * current versions of firmware which depend on this opregion
  1435.          * notification have repurposed the D1 definition to mean
  1436.          * "runtime suspended" vs. what you would normally expect (D3)
  1437.          * to distinguish it from notifications that might be sent
  1438.          * via the suspend path.
  1439.          */
  1440.         intel_opregion_notify_adapter(dev, PCI_D1);
  1441.  
  1442.         DRM_DEBUG_KMS("Device suspended\n");
  1443.         return 0;
  1444. }
  1445.  
  1446. static int intel_runtime_resume(struct device *device)
  1447. {
  1448.         struct pci_dev *pdev = to_pci_dev(device);
  1449.         struct drm_device *dev = pci_get_drvdata(pdev);
  1450.         struct drm_i915_private *dev_priv = dev->dev_private;
  1451.         int ret;
  1452.  
  1453.         WARN_ON(!HAS_RUNTIME_PM(dev));
  1454.  
  1455.         DRM_DEBUG_KMS("Resuming device\n");
  1456.  
  1457.         intel_opregion_notify_adapter(dev, PCI_D0);
  1458.         dev_priv->pm.suspended = false;
  1459.  
  1460.         if (IS_GEN6(dev)) {
  1461.                 ret = snb_runtime_resume(dev_priv);
  1462.         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  1463.                 ret = hsw_runtime_resume(dev_priv);
  1464.         } else if (IS_VALLEYVIEW(dev)) {
  1465.                 ret = vlv_runtime_resume(dev_priv);
  1466.         } else {
  1467.                 WARN_ON(1);
  1468.                 ret = -ENODEV;
  1469.         }
  1470.  
  1471.         /*
  1472.          * No point of rolling back things in case of an error, as the best
  1473.          * we can do is to hope that things will still work (and disable RPM).
  1474.          */
  1475.         i915_gem_init_swizzling(dev);
  1476.         gen6_update_ring_freq(dev);
  1477.  
  1478.         intel_runtime_pm_restore_interrupts(dev);
  1479.         intel_reset_gt_powersave(dev);
  1480.  
  1481.         if (ret)
  1482.                 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
  1483.         else
  1484.                 DRM_DEBUG_KMS("Device resumed\n");
  1485.  
  1486.         return ret;
  1487. }
  1488.  
  1489. static const struct dev_pm_ops i915_pm_ops = {
  1490.         .suspend = i915_pm_suspend,
  1491.         .suspend_late = i915_pm_suspend_late,
  1492.         .resume_early = i915_pm_resume_early,
  1493.         .resume = i915_pm_resume,
  1494.         .freeze = i915_pm_freeze,
  1495.         .thaw_early = i915_pm_thaw_early,
  1496.         .thaw = i915_pm_thaw,
  1497.         .poweroff = i915_pm_poweroff,
  1498.         .restore_early = i915_pm_resume_early,
  1499.         .restore = i915_pm_resume,
  1500.         .runtime_suspend = intel_runtime_suspend,
  1501.         .runtime_resume = intel_runtime_resume,
  1502. };
  1503.  
  1504. static const struct vm_operations_struct i915_gem_vm_ops = {
  1505.         .fault = i915_gem_fault,
  1506.         .open = drm_gem_vm_open,
  1507.         .close = drm_gem_vm_close,
  1508. };
  1509.  
  1510. static const struct file_operations i915_driver_fops = {
  1511.         .owner = THIS_MODULE,
  1512.         .open = drm_open,
  1513.         .release = drm_release,
  1514.         .unlocked_ioctl = drm_ioctl,
  1515.         .mmap = drm_gem_mmap,
  1516.         .poll = drm_poll,
  1517.         .read = drm_read,
  1518. #ifdef CONFIG_COMPAT
  1519.         .compat_ioctl = i915_compat_ioctl,
  1520. #endif
  1521.         .llseek = noop_llseek,
  1522. };
  1523. #endif
  1524.  
  1525. static struct drm_driver driver = {
  1526.     /* Don't use MTRRs here; the Xserver or userspace app should
  1527.      * deal with them for Intel hardware.
  1528.      */
  1529.     .driver_features =
  1530.             DRIVER_USE_AGP |
  1531.             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
  1532.             DRIVER_RENDER,
  1533.     .load = i915_driver_load,
  1534. //    .unload = i915_driver_unload,
  1535.       .open = i915_driver_open,
  1536. //    .lastclose = i915_driver_lastclose,
  1537. //    .preclose = i915_driver_preclose,
  1538. //    .postclose = i915_driver_postclose,
  1539.  
  1540.     /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
  1541. //    .suspend = i915_suspend,
  1542. //    .resume = i915_resume,
  1543.  
  1544. //    .device_is_agp = i915_driver_device_is_agp,
  1545. //    .master_create = i915_master_create,
  1546. //    .master_destroy = i915_master_destroy,
  1547. #if defined(CONFIG_DEBUG_FS)
  1548.         .debugfs_init = i915_debugfs_init,
  1549.         .debugfs_cleanup = i915_debugfs_cleanup,
  1550. #endif
  1551.     .gem_free_object = i915_gem_free_object,
  1552.  
  1553. //    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  1554. //    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  1555. //    .gem_prime_export = i915_gem_prime_export,
  1556. //    .gem_prime_import = i915_gem_prime_import,
  1557.  
  1558. //    .dumb_create = i915_gem_dumb_create,
  1559. //    .dumb_map_offset = i915_gem_mmap_gtt,
  1560. //    .dumb_destroy = i915_gem_dumb_destroy,
  1561. //    .ioctls = i915_ioctls,
  1562. //    .fops = &i915_driver_fops,
  1563. //    .name = DRIVER_NAME,
  1564. //    .desc = DRIVER_DESC,
  1565. //    .date = DRIVER_DATE,
  1566. //    .major = DRIVER_MAJOR,
  1567. //    .minor = DRIVER_MINOR,
  1568. //    .patchlevel = DRIVER_PATCHLEVEL,
  1569. };
  1570.  
  1571.  
  1572.  
  1573.  
  1574. int i915_init(void)
  1575. {
  1576.     static pci_dev_t device;
  1577.     const struct pci_device_id  *ent;
  1578.     int  err;
  1579.  
  1580.     ent = find_pci_device(&device, pciidlist);
  1581.     if( unlikely(ent == NULL) )
  1582.     {
  1583.         dbgprintf("device not found\n");
  1584.         return -ENODEV;
  1585.     };
  1586.  
  1587.     drm_core_init();
  1588.  
  1589.     DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
  1590.                                 device.pci_dev.device);
  1591.  
  1592.     driver.driver_features |= DRIVER_MODESET;
  1593.  
  1594.     err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
  1595.  
  1596.     return err;
  1597. }
  1598.  
  1599.  
  1600.  
  1601.