Subversion Repositories Kolibri OS

Rev

Rev 3120 | Rev 3255 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
  2.  */
  3. /*
  4.  *
  5.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6.  * All Rights Reserved.
  7.  *
  8.  * Permission is hereby granted, free of charge, to any person obtaining a
  9.  * copy of this software and associated documentation files (the
  10.  * "Software"), to deal in the Software without restriction, including
  11.  * without limitation the rights to use, copy, modify, merge, publish,
  12.  * distribute, sub license, and/or sell copies of the Software, and to
  13.  * permit persons to whom the Software is furnished to do so, subject to
  14.  * the following conditions:
  15.  *
  16.  * The above copyright notice and this permission notice (including the
  17.  * next paragraph) shall be included in all copies or substantial portions
  18.  * of the Software.
  19.  *
  20.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27.  *
  28.  */
  29.  
  30. //#include <linux/device.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "intel_drv.h"
  35.  
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/mod_devicetable.h>
  39. #include <errno-base.h>
  40. #include <linux/pci.h>
  41.  
  42. #include <drm/drm_crtc_helper.h>
  43.  
  44. #include <syscall.h>
  45.  
  46. #define __read_mostly
  47.  
  48. int init_display_kms(struct drm_device *dev);
  49.  
  50. struct drm_device *main_device;
  51.  
  52. static int i915_modeset __read_mostly = 1;
  53. MODULE_PARM_DESC(modeset,
  54.                 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
  55.                 "1=on, -1=force vga console preference [default])");
  56.  
  57.  
  58. int i915_panel_ignore_lid __read_mostly         =  0;
  59. MODULE_PARM_DESC(panel_ignore_lid,
  60.                 "Override lid status (0=autodetect [default], 1=lid open, "
  61.                 "-1=lid closed)");
  62.  
  63. unsigned int i915_powersave  __read_mostly      =  0;
  64. MODULE_PARM_DESC(powersave,
  65.                 "Enable powersavings, fbc, downclocking, etc. (default: true)");
  66.  
  67. int i915_semaphores __read_mostly = -1;
  68.  
  69. MODULE_PARM_DESC(semaphores,
  70.                 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
  71.  
  72. int i915_enable_rc6 __read_mostly      = 0;
  73. MODULE_PARM_DESC(i915_enable_rc6,
  74.                 "Enable power-saving render C-state 6. "
  75.                 "Different stages can be selected via bitmask values "
  76.                 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
  77.                 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
  78.                 "default: -1 (use per-chip default)");
  79.  
  80. int i915_enable_fbc __read_mostly      =  0;
  81. MODULE_PARM_DESC(i915_enable_fbc,
  82.                 "Enable frame buffer compression for power savings "
  83.                 "(default: -1 (use per-chip default))");
  84.  
  85. unsigned int i915_lvds_downclock  __read_mostly =  0;
  86. MODULE_PARM_DESC(lvds_downclock,
  87.                 "Use panel (LVDS/eDP) downclocking for power savings "
  88.                 "(default: false)");
  89.  
  90. int i915_lvds_channel_mode __read_mostly;
  91. MODULE_PARM_DESC(lvds_channel_mode,
  92.                  "Specify LVDS channel mode "
  93.                  "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
  94.  
  95. int i915_panel_use_ssc __read_mostly = -1;
  96. MODULE_PARM_DESC(lvds_use_ssc,
  97.                 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
  98.                 "(default: auto from VBT)");
  99.  
  100. int i915_vbt_sdvo_panel_type __read_mostly      = -1;
  101. MODULE_PARM_DESC(vbt_sdvo_panel_type,
  102.                 "Override/Ignore selection of SDVO panel mode in the VBT "
  103.                 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
  104.  
  105. static bool i915_try_reset __read_mostly = true;
  106. MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
  107.  
  108. bool i915_enable_hangcheck __read_mostly = false;
  109. MODULE_PARM_DESC(enable_hangcheck,
  110.                 "Periodically check GPU activity for detecting hangs. "
  111.                 "WARNING: Disabling this can cause system wide hangs. "
  112.                 "(default: true)");
  113.  
  114. int i915_enable_ppgtt __read_mostly = false;
  115. MODULE_PARM_DESC(i915_enable_ppgtt,
  116.                 "Enable PPGTT (default: true)");
  117.  
  118. unsigned int i915_preliminary_hw_support __read_mostly = true;
  119. MODULE_PARM_DESC(preliminary_hw_support,
  120.                 "Enable preliminary hardware support. "
  121.                 "Enable Haswell and ValleyView Support. "
  122.                 "(default: false)");
  123.  
  124.  
  125. #define PCI_VENDOR_ID_INTEL        0x8086
  126.  
  127. #define INTEL_VGA_DEVICE(id, info) {        \
  128.         .class = PCI_BASE_CLASS_DISPLAY << 16,  \
  129.     .class_mask = 0xff0000,                 \
  130.     .vendor = 0x8086,                       \
  131.     .device = id,                           \
  132.     .subvendor = PCI_ANY_ID,                \
  133.     .subdevice = PCI_ANY_ID,                \
  134.     .driver_data = (unsigned long) info }
  135.  
  136.  
  137. static const struct intel_device_info intel_i915g_info = {
  138.         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
  139.         .has_overlay = 1, .overlay_needs_physical = 1,
  140. };
  141. static const struct intel_device_info intel_i915gm_info = {
  142.         .gen = 3, .is_mobile = 1,
  143.         .cursor_needs_physical = 1,
  144.         .has_overlay = 1, .overlay_needs_physical = 1,
  145.         .supports_tv = 1,
  146. };
  147. static const struct intel_device_info intel_i945g_info = {
  148.         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
  149.         .has_overlay = 1, .overlay_needs_physical = 1,
  150. };
  151. static const struct intel_device_info intel_i945gm_info = {
  152.         .gen = 3, .is_i945gm = 1, .is_mobile = 1,
  153.         .has_hotplug = 1, .cursor_needs_physical = 1,
  154.         .has_overlay = 1, .overlay_needs_physical = 1,
  155.         .supports_tv = 1,
  156. };
  157.  
  158. static const struct intel_device_info intel_i965g_info = {
  159.         .gen = 4, .is_broadwater = 1,
  160.         .has_hotplug = 1,
  161.         .has_overlay = 1,
  162. };
  163.  
  164. static const struct intel_device_info intel_i965gm_info = {
  165.         .gen = 4, .is_crestline = 1,
  166.         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
  167.         .has_overlay = 1,
  168.         .supports_tv = 1,
  169. };
  170.  
  171. static const struct intel_device_info intel_g33_info = {
  172.         .gen = 3, .is_g33 = 1,
  173.         .need_gfx_hws = 1, .has_hotplug = 1,
  174.         .has_overlay = 1,
  175. };
  176.  
  177. static const struct intel_device_info intel_g45_info = {
  178.         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
  179.         .has_pipe_cxsr = 1, .has_hotplug = 1,
  180.         .has_bsd_ring = 1,
  181. };
  182.  
  183. static const struct intel_device_info intel_gm45_info = {
  184.         .gen = 4, .is_g4x = 1,
  185.         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
  186.         .has_pipe_cxsr = 1, .has_hotplug = 1,
  187.         .supports_tv = 1,
  188.         .has_bsd_ring = 1,
  189. };
  190.  
  191. static const struct intel_device_info intel_pineview_info = {
  192.         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
  193.         .need_gfx_hws = 1, .has_hotplug = 1,
  194.         .has_overlay = 1,
  195. };
  196.  
  197. static const struct intel_device_info intel_ironlake_d_info = {
  198.         .gen = 5,
  199.         .need_gfx_hws = 1, .has_hotplug = 1,
  200.         .has_bsd_ring = 1,
  201. };
  202.  
  203. static const struct intel_device_info intel_ironlake_m_info = {
  204.         .gen = 5, .is_mobile = 1,
  205.         .need_gfx_hws = 1, .has_hotplug = 1,
  206.         .has_fbc = 1,
  207.         .has_bsd_ring = 1,
  208. };
  209.  
  210. static const struct intel_device_info intel_sandybridge_d_info = {
  211.     .gen = 6,
  212.         .need_gfx_hws = 1, .has_hotplug = 1,
  213.     .has_bsd_ring = 1,
  214.     .has_blt_ring = 1,
  215.         .has_llc = 1,
  216.         .has_force_wake = 1,
  217. };
  218.  
  219. static const struct intel_device_info intel_sandybridge_m_info = {
  220.         .gen = 6, .is_mobile = 1,
  221.         .need_gfx_hws = 1, .has_hotplug = 1,
  222.     .has_fbc      = 1,
  223.     .has_bsd_ring = 1,
  224.     .has_blt_ring = 1,
  225.         .has_llc = 1,
  226.         .has_force_wake = 1,
  227. };
  228.  
  229. static const struct intel_device_info intel_ivybridge_d_info = {
  230.         .is_ivybridge = 1, .gen = 7,
  231.         .need_gfx_hws = 1, .has_hotplug = 1,
  232.         .has_bsd_ring = 1,
  233.         .has_blt_ring = 1,
  234.         .has_llc = 1,
  235.         .has_force_wake = 1,
  236. };
  237.  
  238. static const struct intel_device_info intel_ivybridge_m_info = {
  239.         .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
  240.         .need_gfx_hws = 1, .has_hotplug = 1,
  241.         .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
  242.         .has_bsd_ring = 1,
  243.         .has_blt_ring = 1,
  244.         .has_llc = 1,
  245.         .has_force_wake = 1,
  246. };
  247.  
  248. static const struct intel_device_info intel_valleyview_m_info = {
  249.         .gen = 7, .is_mobile = 1,
  250.         .need_gfx_hws = 1, .has_hotplug = 1,
  251.         .has_fbc = 0,
  252.         .has_bsd_ring = 1,
  253.         .has_blt_ring = 1,
  254.         .is_valleyview = 1,
  255. };
  256.  
  257. static const struct intel_device_info intel_valleyview_d_info = {
  258.         .gen = 7,
  259.         .need_gfx_hws = 1, .has_hotplug = 1,
  260.         .has_fbc = 0,
  261.         .has_bsd_ring = 1,
  262.         .has_blt_ring = 1,
  263.         .is_valleyview = 1,
  264. };
  265.  
  266. static const struct intel_device_info intel_haswell_d_info = {
  267.         .is_haswell = 1, .gen = 7,
  268.         .need_gfx_hws = 1, .has_hotplug = 1,
  269.         .has_bsd_ring = 1,
  270.         .has_blt_ring = 1,
  271.         .has_llc = 1,
  272.         .has_force_wake = 1,
  273. };
  274.  
  275. static const struct intel_device_info intel_haswell_m_info = {
  276.         .is_haswell = 1, .gen = 7, .is_mobile = 1,
  277.         .need_gfx_hws = 1, .has_hotplug = 1,
  278.         .has_bsd_ring = 1,
  279.         .has_blt_ring = 1,
  280.         .has_llc = 1,
  281.         .has_force_wake = 1,
  282. };
  283.  
  284. static const struct pci_device_id pciidlist[] = {       /* aka */
  285.         INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),            /* I915_G */
  286.         INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),            /* E7221_G */
  287.         INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),           /* I915_GM */
  288.         INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),            /* I945_G */
  289.         INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),           /* I945_GM */
  290.         INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),           /* I945_GME */
  291.         INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),            /* I946_GZ */
  292.         INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),            /* G35_G */
  293.         INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),            /* I965_Q */
  294.         INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),            /* I965_G */
  295.         INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),              /* Q35_G */
  296.         INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),              /* G33_G */
  297.         INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),              /* Q33_G */
  298.         INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),           /* I965_GM */
  299.         INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),           /* I965_GME */
  300.         INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),             /* GM45_G */
  301.         INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),              /* IGD_E_G */
  302.         INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),              /* Q45_G */
  303.         INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),              /* G45_G */
  304.         INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),              /* G41_G */
  305.         INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),              /* B43_G */
  306.         INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),              /* B43_G.1 */
  307.         INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
  308.         INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
  309.         INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
  310.         INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
  311.     INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
  312.     INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
  313.     INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
  314.     INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
  315.     INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
  316.     INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
  317.     INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
  318.         INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
  319.         INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
  320.         INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
  321.         INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
  322.         INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
  323.         INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
  324.         INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
  325.         INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
  326.         INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
  327.         INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
  328.         INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
  329.         INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
  330.         INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
  331.         INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
  332.         INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
  333.         INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
  334.         INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
  335.         INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
  336.         INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
  337.         INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
  338.         INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
  339.         INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
  340.         INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
  341.         INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
  342.         INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
  343.         INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
  344.         INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
  345.         INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
  346.         INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
  347.         INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
  348.         INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
  349.         INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
  350.         INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
  351.         INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
  352.         INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
  353.         INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
  354.         INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
  355.         INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
  356.         INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
  357.         INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
  358.         INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
  359.         INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
  360.         INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
  361.         INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
  362.         INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
  363.     {0, 0, 0}
  364. };
  365.  
  366. #define INTEL_PCH_DEVICE_ID_MASK        0xff00
  367. #define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
  368. #define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
  369. #define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
  370. #define INTEL_PCH_LPT_DEVICE_ID_TYPE    0x8c00
  371.  
  372. void intel_detect_pch(struct drm_device *dev)
  373. {
  374.     struct drm_i915_private *dev_priv = dev->dev_private;
  375.     struct pci_dev *pch;
  376.  
  377.     /*
  378.      * The reason to probe ISA bridge instead of Dev31:Fun0 is to
  379.      * make graphics device passthrough work easy for VMM, that only
  380.      * need to expose ISA bridge to let driver know the real hardware
  381.      * underneath. This is a requirement from virtualization team.
  382.      */
  383.     pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
  384.     if (pch) {
  385.         if (pch->vendor == PCI_VENDOR_ID_INTEL) {
  386.                         unsigned short id;
  387.             id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
  388.                         dev_priv->pch_id = id;
  389.  
  390.             if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
  391.                 dev_priv->pch_type = PCH_IBX;
  392.                                 dev_priv->num_pch_pll = 2;
  393.                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
  394.                                 WARN_ON(!IS_GEN5(dev));
  395.             } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
  396.                 dev_priv->pch_type = PCH_CPT;
  397.                                 dev_priv->num_pch_pll = 2;
  398.                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
  399.                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  400.             } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
  401.                 /* PantherPoint is CPT compatible */
  402.                 dev_priv->pch_type = PCH_CPT;
  403.                                 dev_priv->num_pch_pll = 2;
  404.                 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
  405.                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  406.                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  407.                                 dev_priv->pch_type = PCH_LPT;
  408.                                 dev_priv->num_pch_pll = 0;
  409.                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
  410.                                 WARN_ON(!IS_HASWELL(dev));
  411.                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
  412.                                 dev_priv->pch_type = PCH_LPT;
  413.                                 dev_priv->num_pch_pll = 0;
  414.                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
  415.                                 WARN_ON(!IS_HASWELL(dev));
  416.             }
  417.                         BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
  418.         }
  419.     }
  420. }
  421.  
  422. bool i915_semaphore_is_enabled(struct drm_device *dev)
  423. {
  424.         if (INTEL_INFO(dev)->gen < 6)
  425.                 return 0;
  426.  
  427.         if (i915_semaphores >= 0)
  428.                 return i915_semaphores;
  429.  
  430. #ifdef CONFIG_INTEL_IOMMU
  431.         /* Enable semaphores on SNB when IO remapping is off */
  432.         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
  433.                 return false;
  434. #endif
  435.  
  436.         return 1;
  437. }
  438.  
  439.  
  440.  
  441.  
  442.  
  443. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent);
  444.  
  445. int i915_init(void)
  446. {
  447.     static pci_dev_t device;
  448.     const struct pci_device_id  *ent;
  449.     int  err;
  450.  
  451.     ent = find_pci_device(&device, pciidlist);
  452.     if( unlikely(ent == NULL) )
  453.     {
  454.         dbgprintf("device not found\n");
  455.         return 0;
  456.     };
  457.  
  458.     struct intel_device_info *intel_info =
  459.         (struct intel_device_info *) ent->driver_data;
  460.  
  461.         if (intel_info->is_valleyview)
  462.         if(!i915_preliminary_hw_support) {
  463.             DRM_ERROR("Preliminary hardware support disabled\n");
  464.             return -ENODEV;
  465.         }
  466.  
  467.     DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
  468.                                 device.pci_dev.device);
  469.  
  470.     if (intel_info->gen != 3) {
  471.  
  472.     } else if (init_agp() != 0) {
  473.         DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
  474.         return -ENODEV;
  475.     }
  476.  
  477.     err = drm_get_dev(&device.pci_dev, ent);
  478.  
  479.     return err;
  480. }
  481.  
  482. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
  483. {
  484.     struct drm_device *dev;
  485.     static struct drm_driver driver;
  486.  
  487.     int ret;
  488.  
  489.     dev = kzalloc(sizeof(*dev), 0);
  490.     if (!dev)
  491.         return -ENOMEM;
  492.  
  493.  //   ret = pci_enable_device(pdev);
  494.  //   if (ret)
  495.  //       goto err_g1;
  496.  
  497.     pci_set_master(pdev);
  498.  
  499.  //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
  500.  //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  501.  //       goto err_g2;
  502.  //   }
  503.  
  504.     dev->pdev = pdev;
  505.     dev->pci_device = pdev->device;
  506.     dev->pci_vendor = pdev->vendor;
  507.  
  508.     INIT_LIST_HEAD(&dev->filelist);
  509.     INIT_LIST_HEAD(&dev->ctxlist);
  510.     INIT_LIST_HEAD(&dev->vmalist);
  511.     INIT_LIST_HEAD(&dev->maplist);
  512.  
  513.     spin_lock_init(&dev->count_lock);
  514.     mutex_init(&dev->struct_mutex);
  515.     mutex_init(&dev->ctxlist_mutex);
  516.  
  517.     dev->driver = &driver;
  518.  
  519.     ret = i915_driver_load(dev, ent->driver_data );
  520.  
  521.     if (ret)
  522.         goto err_g4;
  523.  
  524.     ret = init_display_kms(dev);
  525.  
  526.     if (ret)
  527.         goto err_g4;
  528.  
  529.     return 0;
  530.  
  531. err_g4:
  532. //    drm_put_minor(&dev->primary);
  533. //err_g3:
  534. //    if (drm_core_check_feature(dev, DRIVER_MODESET))
  535. //        drm_put_minor(&dev->control);
  536. //err_g2:
  537. //    pci_disable_device(pdev);
  538. //err_g1:
  539.     free(dev);
  540.  
  541.     return ret;
  542. }
  543.  
  544. /* We give fast paths for the really cool registers */
  545. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  546.         ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
  547.          ((reg) < 0x40000) &&            \
  548.          ((reg) != FORCEWAKE))
  549.  
  550. static bool IS_DISPLAYREG(u32 reg)
  551. {
  552.         /*
  553.          * This should make it easier to transition modules over to the
  554.          * new register block scheme, since we can do it incrementally.
  555.          */
  556.         if (reg >= VLV_DISPLAY_BASE)
  557.                 return false;
  558.  
  559.         if (reg >= RENDER_RING_BASE &&
  560.             reg < RENDER_RING_BASE + 0xff)
  561.                 return false;
  562.         if (reg >= GEN6_BSD_RING_BASE &&
  563.             reg < GEN6_BSD_RING_BASE + 0xff)
  564.                 return false;
  565.         if (reg >= BLT_RING_BASE &&
  566.             reg < BLT_RING_BASE + 0xff)
  567.                 return false;
  568.  
  569.         if (reg == PGTBL_ER)
  570.                 return false;
  571.  
  572.         if (reg >= IPEIR_I965 &&
  573.             reg < HWSTAM)
  574.                 return false;
  575.  
  576.         if (reg == MI_MODE)
  577.                 return false;
  578.  
  579.         if (reg == GFX_MODE_GEN7)
  580.                 return false;
  581.  
  582.         if (reg == RENDER_HWS_PGA_GEN7 ||
  583.             reg == BSD_HWS_PGA_GEN7 ||
  584.             reg == BLT_HWS_PGA_GEN7)
  585.                 return false;
  586.  
  587.         if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
  588.             reg == GEN6_BSD_RNCID)
  589.                 return false;
  590.  
  591.         if (reg == GEN6_BLITTER_ECOSKPD)
  592.                 return false;
  593.  
  594.         if (reg >= 0x4000c &&
  595.             reg <= 0x4002c)
  596.                 return false;
  597.  
  598.         if (reg >= 0x4f000 &&
  599.             reg <= 0x4f08f)
  600.                 return false;
  601.  
  602.         if (reg >= 0x4f100 &&
  603.             reg <= 0x4f11f)
  604.                 return false;
  605.  
  606.         if (reg >= VLV_MASTER_IER &&
  607.             reg <= GEN6_PMIER)
  608.                 return false;
  609.  
  610.         if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
  611.             reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
  612.                 return false;
  613.  
  614.         if (reg >= VLV_IIR_RW &&
  615.             reg <= VLV_ISR)
  616.                 return false;
  617.  
  618.         if (reg == FORCEWAKE_VLV ||
  619.             reg == FORCEWAKE_ACK_VLV)
  620.                 return false;
  621.  
  622.         if (reg == GEN6_GDRST)
  623.                 return false;
  624.  
  625.         switch (reg) {
  626.         case _3D_CHICKEN3:
  627.         case IVB_CHICKEN3:
  628.         case GEN7_COMMON_SLICE_CHICKEN1:
  629.         case GEN7_L3CNTLREG1:
  630.         case GEN7_L3_CHICKEN_MODE_REGISTER:
  631.         case GEN7_ROW_CHICKEN2:
  632.         case GEN7_L3SQCREG4:
  633.         case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
  634.         case GEN7_HALF_SLICE_CHICKEN1:
  635.         case GEN6_MBCTL:
  636.         case GEN6_UCGCTL2:
  637.                 return false;
  638.         default:
  639.                 break;
  640.         }
  641.  
  642.         return true;
  643. }
  644.  
  645. static void
  646. ilk_dummy_write(struct drm_i915_private *dev_priv)
  647. {
  648.         /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
  649.          * chip from rc6 before touching it for real. MI_MODE is masked, hence
  650.          * harmless to write 0 into. */
  651.         I915_WRITE_NOTRACE(MI_MODE, 0);
  652. }
  653.  
  654. #define __i915_read(x, y) \
  655. u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
  656.         u##x val = 0; \
  657.         if (IS_GEN5(dev_priv->dev)) \
  658.                 ilk_dummy_write(dev_priv); \
  659.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  660.                 unsigned long irqflags; \
  661.                 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
  662.                 if (dev_priv->forcewake_count == 0) \
  663.                         dev_priv->gt.force_wake_get(dev_priv); \
  664.                 val = read##y(dev_priv->regs + reg); \
  665.                 if (dev_priv->forcewake_count == 0) \
  666.                         dev_priv->gt.force_wake_put(dev_priv); \
  667.                 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
  668.         } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
  669.                 val = read##y(dev_priv->regs + reg + 0x180000);         \
  670.         } else { \
  671.                 val = read##y(dev_priv->regs + reg); \
  672.         } \
  673.         return val; \
  674. }
  675.  
  676. __i915_read(8, b)
  677. __i915_read(16, w)
  678. __i915_read(32, l)
  679. __i915_read(64, q)
  680. #undef __i915_read
  681.  
  682. #define __i915_write(x, y) \
  683. void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
  684.         u32 __fifo_ret = 0; \
  685.         trace_i915_reg_rw(true, reg, val, sizeof(val)); \
  686.         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  687.                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  688.         } \
  689.         if (IS_GEN5(dev_priv->dev)) \
  690.                 ilk_dummy_write(dev_priv); \
  691.         if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
  692.                 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
  693.                 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
  694.         } \
  695.         if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
  696.                 write##y(val, dev_priv->regs + reg + 0x180000);         \
  697.         } else {                                                        \
  698.         write##y(val, dev_priv->regs + reg); \
  699.         }                                                               \
  700.         if (unlikely(__fifo_ret)) { \
  701.                 gen6_gt_check_fifodbg(dev_priv); \
  702.         } \
  703.         if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
  704.                 DRM_ERROR("Unclaimed write to %x\n", reg); \
  705.                 writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT);  \
  706.         } \
  707. }
  708. __i915_write(8, b)
  709. __i915_write(16, w)
  710. __i915_write(32, l)
  711. __i915_write(64, q)
  712. #undef __i915_write
  713.