38,6 → 38,7 |
#include <linux/mod_devicetable.h> |
#include <errno-base.h> |
#include <linux/pci.h> |
#include <drm/i915_pciids.h> |
|
#include <drm/drm_crtc_helper.h> |
|
45,125 → 46,32 |
|
#define __read_mostly |
|
int init_display_kms(struct drm_device *dev); |
static struct drm_driver driver; |
|
static int i915_modeset __read_mostly = 1; |
module_param_named(modeset, i915_modeset, int, 0400); |
MODULE_PARM_DESC(modeset, |
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " |
"1=on, -1=force vga console preference [default])"); |
#define GEN_DEFAULT_PIPEOFFSETS \ |
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ |
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ |
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ |
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } |
|
unsigned int i915_fbpercrtc __always_unused = 0; |
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
#define GEN_CHV_PIPEOFFSETS \ |
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ |
CHV_PIPE_C_OFFSET }, \ |
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
CHV_TRANSCODER_C_OFFSET, }, \ |
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ |
CHV_PALETTE_C_OFFSET } |
|
int i915_panel_ignore_lid __read_mostly = 1; |
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); |
MODULE_PARM_DESC(panel_ignore_lid, |
"Override lid status (0=autodetect, 1=autodetect disabled [default], " |
"-1=force lid closed, -2=force lid open)"); |
#define CURSOR_OFFSETS \ |
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } |
|
unsigned int i915_powersave __read_mostly = 1; |
module_param_named(powersave, i915_powersave, int, 0600); |
MODULE_PARM_DESC(powersave, |
"Enable powersavings, fbc, downclocking, etc. (default: true)"); |
#define IVB_CURSOR_OFFSETS \ |
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } |
|
int i915_semaphores __read_mostly = -1; |
module_param_named(semaphores, i915_semaphores, int, 0400); |
MODULE_PARM_DESC(semaphores, |
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); |
int init_display_kms(struct drm_device *dev); |
|
int i915_enable_rc6 __read_mostly = -1; |
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); |
MODULE_PARM_DESC(i915_enable_rc6, |
"Enable power-saving render C-state 6. " |
"Different stages can be selected via bitmask values " |
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " |
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " |
"default: -1 (use per-chip default)"); |
|
int i915_enable_fbc __read_mostly = -1; |
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
MODULE_PARM_DESC(i915_enable_fbc, |
"Enable frame buffer compression for power savings " |
"(default: -1 (use per-chip default))"); |
|
unsigned int i915_lvds_downclock __read_mostly = 0; |
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
MODULE_PARM_DESC(lvds_downclock, |
"Use panel (LVDS/eDP) downclocking for power savings " |
"(default: false)"); |
|
int i915_lvds_channel_mode __read_mostly; |
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); |
MODULE_PARM_DESC(lvds_channel_mode, |
"Specify LVDS channel mode " |
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); |
|
int i915_panel_use_ssc __read_mostly = -1; |
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
MODULE_PARM_DESC(lvds_use_ssc, |
"Use Spread Spectrum Clock with panels [LVDS/eDP] " |
"(default: auto from VBT)"); |
|
int i915_vbt_sdvo_panel_type __read_mostly = -1; |
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
MODULE_PARM_DESC(vbt_sdvo_panel_type, |
"Override/Ignore selection of SDVO panel mode in the VBT " |
"(-2=ignore, -1=auto [default], index in VBT BIOS table)"); |
|
static bool i915_try_reset __read_mostly = true; |
module_param_named(reset, i915_try_reset, bool, 0600); |
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); |
|
bool i915_enable_hangcheck __read_mostly = false; |
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); |
MODULE_PARM_DESC(enable_hangcheck, |
"Periodically check GPU activity for detecting hangs. " |
"WARNING: Disabling this can cause system wide hangs. " |
"(default: true)"); |
|
int i915_enable_ppgtt __read_mostly = -1; |
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400); |
MODULE_PARM_DESC(i915_enable_ppgtt, |
"Enable PPGTT (default: true)"); |
|
int i915_enable_psr __read_mostly = 0; |
module_param_named(enable_psr, i915_enable_psr, int, 0600); |
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); |
|
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT); |
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); |
MODULE_PARM_DESC(preliminary_hw_support, |
"Enable preliminary hardware support."); |
|
int i915_disable_power_well __read_mostly = 1; |
module_param_named(disable_power_well, i915_disable_power_well, int, 0600); |
MODULE_PARM_DESC(disable_power_well, |
"Disable the power well when possible (default: true)"); |
|
int i915_enable_ips __read_mostly = 1; |
module_param_named(enable_ips, i915_enable_ips, int, 0600); |
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); |
|
bool i915_fastboot __read_mostly = 0; |
module_param_named(fastboot, i915_fastboot, bool, 0600); |
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " |
"(default: false)"); |
|
int i915_enable_pc8 __read_mostly = 0; |
module_param_named(enable_pc8, i915_enable_pc8, int, 0600); |
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)"); |
|
int i915_pc8_timeout __read_mostly = 5000; |
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600); |
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)"); |
|
bool i915_prefault_disable __read_mostly; |
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); |
MODULE_PARM_DESC(prefault_disable, |
"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); |
|
static struct drm_driver driver; |
extern int intel_agp_enabled; |
|
#define PCI_VENDOR_ID_INTEL 0x8086 |
173,6 → 81,8 |
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
.has_overlay = 1, .overlay_needs_physical = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
static const struct intel_device_info intel_i915gm_info = { |
.gen = 3, .is_mobile = 1, .num_pipes = 2, |
181,11 → 91,15 |
.supports_tv = 1, |
.has_fbc = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
static const struct intel_device_info intel_i945g_info = { |
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
.has_overlay = 1, .overlay_needs_physical = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
static const struct intel_device_info intel_i945gm_info = { |
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, |
194,6 → 108,8 |
.supports_tv = 1, |
.has_fbc = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_i965g_info = { |
201,6 → 117,8 |
.has_hotplug = 1, |
.has_overlay = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_i965gm_info = { |
209,6 → 127,8 |
.has_overlay = 1, |
.supports_tv = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_g33_info = { |
216,6 → 136,8 |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_overlay = 1, |
.ring_mask = RENDER_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_g45_info = { |
222,6 → 144,8 |
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, |
.has_pipe_cxsr = 1, .has_hotplug = 1, |
.ring_mask = RENDER_RING | BSD_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_gm45_info = { |
230,6 → 154,8 |
.has_pipe_cxsr = 1, .has_hotplug = 1, |
.supports_tv = 1, |
.ring_mask = RENDER_RING | BSD_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_pineview_info = { |
236,6 → 162,8 |
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_overlay = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_ironlake_d_info = { |
242,6 → 170,8 |
.gen = 5, .num_pipes = 2, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.ring_mask = RENDER_RING | BSD_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_ironlake_m_info = { |
249,6 → 179,8 |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_fbc = 1, |
.ring_mask = RENDER_RING | BSD_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_sandybridge_d_info = { |
257,6 → 189,8 |
.has_fbc = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
.has_llc = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_sandybridge_m_info = { |
265,6 → 199,8 |
.has_fbc = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
.has_llc = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
#define GEN7_FEATURES \ |
277,6 → 213,8 |
static const struct intel_device_info intel_ivybridge_d_info = { |
GEN7_FEATURES, |
.is_ivybridge = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_ivybridge_m_info = { |
283,6 → 221,8 |
GEN7_FEATURES, |
.is_ivybridge = 1, |
.is_mobile = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_ivybridge_q_info = { |
289,6 → 229,8 |
GEN7_FEATURES, |
.is_ivybridge = 1, |
.num_pipes = 0, /* legal, last one wins */ |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_valleyview_m_info = { |
299,6 → 241,8 |
.display_mmio_offset = VLV_DISPLAY_BASE, |
.has_fbc = 0, /* legal, last one wins */ |
.has_llc = 0, /* legal, last one wins */ |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_valleyview_d_info = { |
308,6 → 252,8 |
.display_mmio_offset = VLV_DISPLAY_BASE, |
.has_fbc = 0, /* legal, last one wins */ |
.has_llc = 0, /* legal, last one wins */ |
GEN_DEFAULT_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_haswell_d_info = { |
316,6 → 262,8 |
.has_ddi = 1, |
.has_fpga_dbg = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_haswell_m_info = { |
325,6 → 273,8 |
.has_ddi = 1, |
.has_fpga_dbg = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_broadwell_d_info = { |
333,6 → 283,10 |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
.has_llc = 1, |
.has_ddi = 1, |
.has_fpga_dbg = 1, |
.has_fbc = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_broadwell_m_info = { |
341,8 → 295,47 |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
.has_llc = 1, |
.has_ddi = 1, |
.has_fpga_dbg = 1, |
.has_fbc = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_broadwell_gt3d_info = { |
.gen = 8, .num_pipes = 3, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
.has_llc = 1, |
.has_ddi = 1, |
.has_fpga_dbg = 1, |
.has_fbc = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_broadwell_gt3m_info = { |
.gen = 8, .is_mobile = 1, .num_pipes = 3, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
.has_llc = 1, |
.has_ddi = 1, |
.has_fpga_dbg = 1, |
.has_fbc = 1, |
GEN_DEFAULT_PIPEOFFSETS, |
IVB_CURSOR_OFFSETS, |
}; |
|
static const struct intel_device_info intel_cherryview_info = { |
.is_preliminary = 1, |
.gen = 8, .num_pipes = 3, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
.is_valleyview = 1, |
.display_mmio_offset = VLV_DISPLAY_BASE, |
GEN_CHV_PIPEOFFSETS, |
CURSOR_OFFSETS, |
}; |
|
/* |
* Make sure any device matches here are from most specific to most |
* general. For example, since the Quanta match is based on the subsystem |
371,8 → 364,11 |
INTEL_HSW_M_IDS(&intel_haswell_m_info), \ |
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ |
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ |
INTEL_BDW_M_IDS(&intel_broadwell_m_info), \ |
INTEL_BDW_D_IDS(&intel_broadwell_d_info) |
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ |
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ |
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ |
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ |
INTEL_CHV_IDS(&intel_cherryview_info) |
|
static const struct pci_device_id pciidlist[] = { /* aka */ |
INTEL_PCI_IDS, |
388,7 → 384,7 |
void intel_detect_pch(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct pci_dev *pch; |
struct pci_dev *pch = NULL; |
|
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
* (which really amounts to a PCH but no South Display). |
409,12 → 405,9 |
* all the ISA bridge devices and check for the first match, instead |
* of only checking the first one. |
*/ |
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
while (pch) { |
struct pci_dev *curr = pch; |
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { |
if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
unsigned short id; |
id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
dev_priv->pch_id = id; |
|
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
446,17 → 439,16 |
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
WARN_ON(!IS_HASWELL(dev)); |
WARN_ON(!IS_ULT(dev)); |
} else { |
goto check_next; |
} |
} else |
continue; |
|
break; |
} |
check_next: |
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); |
// pci_dev_put(curr); |
} |
if (!pch) |
DRM_DEBUG_KMS("No PCH found?\n"); |
DRM_DEBUG_KMS("No PCH found.\n"); |
|
// pci_dev_put(pch); |
} |
|
bool i915_semaphore_is_enabled(struct drm_device *dev) |
464,15 → 456,13 |
if (INTEL_INFO(dev)->gen < 6) |
return false; |
|
if (i915.semaphores >= 0) |
return i915.semaphores; |
|
/* Until we get further testing... */ |
if (IS_GEN8(dev)) { |
WARN_ON(!i915_preliminary_hw_support); |
if (IS_GEN8(dev)) |
return false; |
} |
|
if (i915_semaphores >= 0) |
return i915_semaphores; |
|
#ifdef CONFIG_INTEL_IOMMU |
/* Enable semaphores on SNB when IO remapping is off */ |
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
483,13 → 473,27 |
} |
|
#if 0 |
static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct drm_encoder *encoder; |
|
drm_modeset_lock_all(dev); |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
|
if (intel_encoder->suspend) |
intel_encoder->suspend(intel_encoder); |
} |
drm_modeset_unlock_all(dev); |
} |
|
static int i915_drm_freeze(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc; |
pci_power_t opregion_target_state; |
|
intel_runtime_pm_get(dev_priv); |
|
/* ignore lid events during suspend */ |
mutex_lock(&dev_priv->modeset_restore_lock); |
dev_priv->modeset_restore = MODESET_SUSPENDED; |
497,8 → 501,7 |
|
/* We do a lot of poking in a lot of registers, make sure they work |
* properly. */ |
hsw_disable_package_c8(dev_priv); |
intel_display_set_init_power(dev, true); |
intel_display_set_init_power(dev_priv, true); |
|
drm_kms_helper_poll_disable(dev); |
|
515,19 → 518,24 |
return error; |
} |
|
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); |
|
drm_irq_uninstall(dev); |
dev_priv->enable_hotplug_processing = false; |
/* |
* Disable CRTCs directly since we want to preserve sw state |
* for _thaw. |
* for _thaw. Also, power gate the CRTC power wells. |
*/ |
mutex_lock(&dev->mode_config.mutex); |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
dev_priv->display.crtc_disable(crtc); |
mutex_unlock(&dev->mode_config.mutex); |
drm_modeset_lock_all(dev); |
for_each_crtc(dev, crtc) |
intel_crtc_control(crtc, false); |
drm_modeset_unlock_all(dev); |
|
intel_dp_mst_suspend(dev); |
|
flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
|
intel_runtime_pm_disable_interrupts(dev); |
intel_suspend_encoders(dev_priv); |
|
intel_suspend_gt_powersave(dev); |
|
intel_modeset_suspend_hw(dev); |
} |
|
535,6 → 543,14 |
|
i915_save_state(dev); |
|
opregion_target_state = PCI_D3cold; |
#if IS_ENABLED(CONFIG_ACPI_SLEEP) |
if (acpi_target_system_state() < ACPI_STATE_S3) |
opregion_target_state = PCI_D1; |
#endif |
intel_opregion_notify_adapter(dev, opregion_target_state); |
|
intel_uncore_forcewake_reset(dev, false); |
intel_opregion_fini(dev); |
|
console_lock(); |
541,6 → 557,10 |
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); |
console_unlock(); |
|
dev_priv->suspend_count++; |
|
intel_display_set_init_power(dev_priv, false); |
|
return 0; |
} |
|
586,33 → 606,24 |
console_unlock(); |
} |
|
static void intel_resume_hotplug(struct drm_device *dev) |
static int i915_drm_thaw_early(struct drm_device *dev) |
{ |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_encoder *encoder; |
struct drm_i915_private *dev_priv = dev->dev_private; |
|
mutex_lock(&mode_config->mutex); |
DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
hsw_disable_pc8(dev_priv); |
|
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
if (encoder->hot_plug) |
encoder->hot_plug(encoder); |
intel_uncore_early_sanitize(dev, true); |
intel_uncore_sanitize(dev); |
intel_power_domains_init_hw(dev_priv); |
|
mutex_unlock(&mode_config->mutex); |
|
/* Just fire off a uevent and let userspace tell us what to do */ |
drm_helper_hpd_irq_event(dev); |
return 0; |
} |
|
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int error = 0; |
|
intel_uncore_early_sanitize(dev); |
|
intel_uncore_sanitize(dev); |
|
if (drm_core_check_feature(dev, DRIVER_MODESET) && |
restore_gtt_mappings) { |
mutex_lock(&dev->struct_mutex); |
620,8 → 631,6 |
mutex_unlock(&dev->struct_mutex); |
} |
|
intel_power_domains_init_hw(dev); |
|
i915_restore_state(dev); |
intel_opregion_setup(dev); |
|
628,19 → 637,29 |
/* KMS EnterVT equivalent */ |
if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
intel_init_pch_refclk(dev); |
drm_mode_config_reset(dev); |
|
mutex_lock(&dev->struct_mutex); |
|
error = i915_gem_init_hw(dev); |
if (i915_gem_init_hw(dev)) { |
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
} |
mutex_unlock(&dev->struct_mutex); |
|
/* We need working interrupts for modeset enabling ... */ |
drm_irq_install(dev); |
intel_runtime_pm_restore_interrupts(dev); |
|
intel_modeset_init_hw(dev); |
|
{ |
unsigned long irqflags; |
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
if (dev_priv->display.hpd_irq_setup) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
} |
|
intel_dp_mst_resume(dev); |
drm_modeset_lock_all(dev); |
drm_mode_config_reset(dev); |
intel_modeset_setup_hw_state(dev, true); |
drm_modeset_unlock_all(dev); |
|
651,9 → 670,8 |
* notifications. |
* */ |
intel_hpd_init(dev); |
dev_priv->enable_hotplug_processing = true; |
/* Config may have changed between suspend and resume */ |
intel_resume_hotplug(dev); |
drm_helper_hpd_irq_event(dev); |
} |
|
intel_opregion_init(dev); |
670,16 → 688,13 |
schedule_work(&dev_priv->console_resume_work); |
} |
|
/* Undo what we did at i915_drm_freeze so the refcount goes back to the |
* expected level. */ |
hsw_enable_package_c8(dev_priv); |
|
mutex_lock(&dev_priv->modeset_restore_lock); |
dev_priv->modeset_restore = MODESET_DONE; |
mutex_unlock(&dev_priv->modeset_restore_lock); |
|
intel_runtime_pm_put(dev_priv); |
return error; |
intel_opregion_notify_adapter(dev, PCI_D0); |
|
return 0; |
} |
|
static int i915_drm_thaw(struct drm_device *dev) |
690,19 → 705,33 |
return __i915_drm_thaw(dev, true); |
} |
|
int i915_resume(struct drm_device *dev) |
static int i915_resume_early(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
return 0; |
|
/* |
* We have a resume ordering issue with the snd-hda driver also |
* requiring our device to be power up. Due to the lack of a |
* parent/child relationship we currently solve this with an early |
* resume hook. |
* |
* FIXME: This should be solved with a special hdmi sink device or |
* similar so that power domains can be employed. |
*/ |
if (pci_enable_device(dev->pdev)) |
return -EIO; |
|
pci_set_master(dev->pdev); |
|
return i915_drm_thaw_early(dev); |
} |
|
int i915_resume(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
/* |
* Platforms with opregion should have sane BIOS, older ones (gen3 and |
* earlier) need to restore the GTT mappings since the BIOS might clear |
716,6 → 745,14 |
return 0; |
} |
|
static int i915_resume_legacy(struct drm_device *dev) |
{ |
i915_resume_early(dev); |
i915_resume(dev); |
|
return 0; |
} |
|
/** |
* i915_reset - reset chip after a hang |
* @dev: drm device to reset |
733,11 → 770,11 |
*/ |
int i915_reset(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = dev->dev_private; |
bool simulated; |
int ret; |
|
if (!i915_try_reset) |
if (!i915.reset) |
return 0; |
|
mutex_lock(&dev->struct_mutex); |
790,8 → 827,21 |
return ret; |
} |
|
drm_irq_uninstall(dev); |
drm_irq_install(dev); |
/* |
* FIXME: This races pretty badly against concurrent holders of |
* ring interrupts. This is possible since we've started to drop |
* dev->struct_mutex in select places when waiting for the gpu. |
*/ |
|
/* |
* rps/rc6 re-init is necessary to restore state lost after the |
* reset and the re-install of gt irqs. Skip for ironlake per |
* previous concerns that it doesn't respond well to some forms |
* of re-init after reset. |
*/ |
if (INTEL_INFO(dev)->gen > 5) |
intel_reset_gt_powersave(dev); |
|
intel_hpd_init(dev); |
} else { |
mutex_unlock(&dev->struct_mutex); |
805,7 → 855,7 |
struct intel_device_info *intel_info = |
(struct intel_device_info *) ent->driver_data; |
|
if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) { |
if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { |
DRM_INFO("This hardware requires preliminary hardware support.\n" |
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); |
return -ENODEV; |
819,17 → 869,7 |
if (PCI_FUNC(pdev->devfn)) |
return -ENODEV; |
|
/* We've managed to ship a kms-enabled ddx that shipped with an XvMC |
* implementation for gen3 (and only gen3) that used legacy drm maps |
* (gasp!) to share buffers between X and the client. Hence we need to |
* keep around the fake agp stuff for gen3, even when kms is enabled. */ |
if (intel_info->gen != 3) { |
driver.driver_features &= |
~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP); |
} else if (!intel_agp_enabled) { |
DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); |
return -ENODEV; |
} |
driver.driver_features &= ~(DRIVER_USE_AGP); |
|
return drm_get_pci_dev(pdev, ent, &driver); |
} |
846,7 → 886,6 |
{ |
struct pci_dev *pdev = to_pci_dev(dev); |
struct drm_device *drm_dev = pci_get_drvdata(pdev); |
int error; |
|
if (!drm_dev || !drm_dev->dev_private) { |
dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
856,10 → 895,30 |
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
return 0; |
|
error = i915_drm_freeze(drm_dev); |
if (error) |
return error; |
return i915_drm_freeze(drm_dev); |
} |
|
static int i915_pm_suspend_late(struct device *dev) |
{ |
struct pci_dev *pdev = to_pci_dev(dev); |
struct drm_device *drm_dev = pci_get_drvdata(pdev); |
struct drm_i915_private *dev_priv = drm_dev->dev_private; |
|
/* |
* We have a suspedn ordering issue with the snd-hda driver also |
* requiring our device to be power up. Due to the lack of a |
* parent/child relationship we currently solve this with an late |
* suspend hook. |
* |
* FIXME: This should be solved with a special hdmi sink device or |
* similar so that power domains can be employed. |
*/ |
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
return 0; |
|
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) |
hsw_enable_pc8(dev_priv); |
|
pci_disable_device(pdev); |
pci_set_power_state(pdev, PCI_D3hot); |
|
866,6 → 925,14 |
return 0; |
} |
|
static int i915_pm_resume_early(struct device *dev) |
{ |
struct pci_dev *pdev = to_pci_dev(dev); |
struct drm_device *drm_dev = pci_get_drvdata(pdev); |
|
return i915_resume_early(drm_dev); |
} |
|
static int i915_pm_resume(struct device *dev) |
{ |
struct pci_dev *pdev = to_pci_dev(dev); |
887,6 → 954,14 |
return i915_drm_freeze(drm_dev); |
} |
|
static int i915_pm_thaw_early(struct device *dev) |
{ |
struct pci_dev *pdev = to_pci_dev(dev); |
struct drm_device *drm_dev = pci_get_drvdata(pdev); |
|
return i915_drm_thaw_early(drm_dev); |
} |
|
static int i915_pm_thaw(struct device *dev) |
{ |
struct pci_dev *pdev = to_pci_dev(dev); |
903,9 → 978,550 |
return i915_drm_freeze(drm_dev); |
} |
|
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv) |
{ |
hsw_enable_pc8(dev_priv); |
|
return 0; |
} |
|
static int snb_runtime_resume(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
|
intel_init_pch_refclk(dev); |
|
return 0; |
} |
|
static int hsw_runtime_resume(struct drm_i915_private *dev_priv) |
{ |
hsw_disable_pc8(dev_priv); |
|
return 0; |
} |
|
/* |
* Save all Gunit registers that may be lost after a D3 and a subsequent |
* S0i[R123] transition. The list of registers needing a save/restore is |
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit |
* registers in the following way: |
* - Driver: saved/restored by the driver |
* - Punit : saved/restored by the Punit firmware |
* - No, w/o marking: no need to save/restore, since the register is R/O or |
* used internally by the HW in a way that doesn't depend |
* keeping the content across a suspend/resume. |
* - Debug : used for debugging |
* |
* We save/restore all registers marked with 'Driver', with the following |
* exceptions: |
* - Registers out of use, including also registers marked with 'Debug'. |
* These have no effect on the driver's operation, so we don't save/restore |
* them to reduce the overhead. |
* - Registers that are fully setup by an initialization function called from |
* the resume path. For example many clock gating and RPS/RC6 registers. |
* - Registers that provide the right functionality with their reset defaults. |
* |
* TODO: Except for registers that based on the above 3 criteria can be safely |
* ignored, we save/restore all others, practically treating the HW context as |
* a black-box for the driver. Further investigation is needed to reduce the |
* saved/restored registers even further, by following the same 3 criteria. |
*/ |
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
{ |
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
int i; |
|
/* GAM 0x4000-0x4770 */ |
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); |
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); |
s->arb_mode = I915_READ(ARB_MODE); |
s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); |
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); |
|
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); |
|
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
|
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); |
s->ecochk = I915_READ(GAM_ECOCHK); |
s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); |
s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); |
|
s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); |
|
/* MBC 0x9024-0x91D0, 0x8500 */ |
s->g3dctl = I915_READ(VLV_G3DCTL); |
s->gsckgctl = I915_READ(VLV_GSCKGCTL); |
s->mbctl = I915_READ(GEN6_MBCTL); |
|
/* GCP 0x9400-0x9424, 0x8100-0x810C */ |
s->ucgctl1 = I915_READ(GEN6_UCGCTL1); |
s->ucgctl3 = I915_READ(GEN6_UCGCTL3); |
s->rcgctl1 = I915_READ(GEN6_RCGCTL1); |
s->rcgctl2 = I915_READ(GEN6_RCGCTL2); |
s->rstctl = I915_READ(GEN6_RSTCTL); |
s->misccpctl = I915_READ(GEN7_MISCCPCTL); |
|
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
s->gfxpause = I915_READ(GEN6_GFXPAUSE); |
s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); |
s->rpdeuc = I915_READ(GEN6_RPDEUC); |
s->ecobus = I915_READ(ECOBUS); |
s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); |
s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); |
s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); |
s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); |
s->rcedata = I915_READ(VLV_RCEDATA); |
s->spare2gh = I915_READ(VLV_SPAREG2H); |
|
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
s->gt_imr = I915_READ(GTIMR); |
s->gt_ier = I915_READ(GTIER); |
s->pm_imr = I915_READ(GEN6_PMIMR); |
s->pm_ier = I915_READ(GEN6_PMIER); |
|
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4); |
|
/* GT SA CZ domain, 0x100000-0x138124 */ |
s->tilectl = I915_READ(TILECTL); |
s->gt_fifoctl = I915_READ(GTFIFOCTL); |
s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); |
s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
s->pmwgicz = I915_READ(VLV_PMWGICZ); |
|
/* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
s->gu_ctl0 = I915_READ(VLV_GU_CTL0); |
s->gu_ctl1 = I915_READ(VLV_GU_CTL1); |
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
|
/* |
* Not saving any of: |
* DFT, 0x9800-0x9EC0 |
* SARB, 0xB000-0xB1FC |
* GAC, 0x5208-0x524C, 0x14000-0x14C000 |
* PCI CFG |
*/ |
} |
|
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
{ |
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
u32 val; |
int i; |
|
/* GAM 0x4000-0x4770 */ |
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); |
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); |
I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); |
I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); |
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); |
|
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); |
|
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); |
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count); |
|
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); |
I915_WRITE(GAM_ECOCHK, s->ecochk); |
I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); |
I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); |
|
I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); |
|
/* MBC 0x9024-0x91D0, 0x8500 */ |
I915_WRITE(VLV_G3DCTL, s->g3dctl); |
I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); |
I915_WRITE(GEN6_MBCTL, s->mbctl); |
|
/* GCP 0x9400-0x9424, 0x8100-0x810C */ |
I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); |
I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); |
I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); |
I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); |
I915_WRITE(GEN6_RSTCTL, s->rstctl); |
I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); |
|
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); |
I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); |
I915_WRITE(GEN6_RPDEUC, s->rpdeuc); |
I915_WRITE(ECOBUS, s->ecobus); |
I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); |
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); |
I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); |
I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); |
I915_WRITE(VLV_RCEDATA, s->rcedata); |
I915_WRITE(VLV_SPAREG2H, s->spare2gh); |
|
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
I915_WRITE(GTIMR, s->gt_imr); |
I915_WRITE(GTIER, s->gt_ier); |
I915_WRITE(GEN6_PMIMR, s->pm_imr); |
I915_WRITE(GEN6_PMIER, s->pm_ier); |
|
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]); |
|
/* GT SA CZ domain, 0x100000-0x138124 */ |
I915_WRITE(TILECTL, s->tilectl); |
I915_WRITE(GTFIFOCTL, s->gt_fifoctl); |
/* |
* Preserve the GT allow wake and GFX force clock bit, they are not |
* be restored, as they are used to control the s0ix suspend/resume |
* sequence by the caller. |
*/ |
val = I915_READ(VLV_GTLC_WAKE_CTRL); |
val &= VLV_GTLC_ALLOWWAKEREQ; |
val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; |
I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
val &= VLV_GFX_CLK_FORCE_ON_BIT; |
val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; |
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
|
I915_WRITE(VLV_PMWGICZ, s->pmwgicz); |
|
/* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); |
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); |
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
} |
#endif |
|
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
{ |
u32 val; |
int err; |
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on); |
|
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) |
/* Wait for a previous force-off to settle */ |
if (force_on) { |
err = wait_for(!COND, 20); |
if (err) { |
DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n", |
I915_READ(VLV_GTLC_SURVIVABILITY_REG)); |
return err; |
} |
} |
|
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
val &= ~VLV_GFX_CLK_FORCE_ON_BIT; |
if (force_on) |
val |= VLV_GFX_CLK_FORCE_ON_BIT; |
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
|
if (!force_on) |
return 0; |
|
err = wait_for(COND, 20); |
if (err) |
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", |
I915_READ(VLV_GTLC_SURVIVABILITY_REG)); |
|
return err; |
#undef COND |
} |
#if 0 |
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
{ |
u32 val; |
int err = 0; |
|
val = I915_READ(VLV_GTLC_WAKE_CTRL); |
val &= ~VLV_GTLC_ALLOWWAKEREQ; |
if (allow) |
val |= VLV_GTLC_ALLOWWAKEREQ; |
I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
POSTING_READ(VLV_GTLC_WAKE_CTRL); |
|
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ |
allow) |
err = wait_for(COND, 1); |
if (err) |
DRM_ERROR("timeout disabling GT waking\n"); |
return err; |
#undef COND |
} |
|
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
bool wait_for_on) |
{ |
u32 mask; |
u32 val; |
int err; |
|
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; |
val = wait_for_on ? mask : 0; |
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) |
if (COND) |
return 0; |
|
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", |
wait_for_on ? "on" : "off", |
I915_READ(VLV_GTLC_PW_STATUS)); |
|
/* |
* RC6 transitioning can be delayed up to 2 msec (see |
* valleyview_enable_rps), use 3 msec for safety. |
*/ |
err = wait_for(COND, 3); |
if (err) |
DRM_ERROR("timeout waiting for GT wells to go %s\n", |
wait_for_on ? "on" : "off"); |
|
return err; |
#undef COND |
} |
|
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) |
{ |
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) |
return; |
|
DRM_ERROR("GT register access while GT waking disabled\n"); |
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
} |
|
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv) |
{ |
u32 mask; |
int err; |
|
/* |
* Bspec defines the following GT well on flags as debug only, so |
* don't treat them as hard failures. |
*/ |
(void)vlv_wait_for_gt_wells(dev_priv, false); |
|
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; |
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); |
|
vlv_check_no_gt_access(dev_priv); |
|
err = vlv_force_gfx_clock(dev_priv, true); |
if (err) |
goto err1; |
|
err = vlv_allow_gt_wake(dev_priv, false); |
if (err) |
goto err2; |
vlv_save_gunit_s0ix_state(dev_priv); |
|
err = vlv_force_gfx_clock(dev_priv, false); |
if (err) |
goto err2; |
|
return 0; |
|
err2: |
/* For safety always re-enable waking and disable gfx clock forcing */ |
vlv_allow_gt_wake(dev_priv, true); |
err1: |
vlv_force_gfx_clock(dev_priv, false); |
|
return err; |
} |
|
static int vlv_runtime_resume(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
int err; |
int ret; |
|
/* |
* If any of the steps fail just try to continue, that's the best we |
* can do at this point. Return the first error code (which will also |
* leave RPM permanently disabled). |
*/ |
ret = vlv_force_gfx_clock(dev_priv, true); |
|
vlv_restore_gunit_s0ix_state(dev_priv); |
|
err = vlv_allow_gt_wake(dev_priv, true); |
if (!ret) |
ret = err; |
|
err = vlv_force_gfx_clock(dev_priv, false); |
if (!ret) |
ret = err; |
|
vlv_check_no_gt_access(dev_priv); |
|
intel_init_clock_gating(dev); |
i915_gem_restore_fences(dev); |
|
return ret; |
} |
|
static int intel_runtime_suspend(struct device *device) |
{ |
struct pci_dev *pdev = to_pci_dev(device); |
struct drm_device *dev = pci_get_drvdata(pdev); |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) |
return -ENODEV; |
|
WARN_ON(!HAS_RUNTIME_PM(dev)); |
assert_force_wake_inactive(dev_priv); |
|
DRM_DEBUG_KMS("Suspending device\n"); |
|
/* |
* We could deadlock here in case another thread holding struct_mutex |
* calls RPM suspend concurrently, since the RPM suspend will wait |
* first for this RPM suspend to finish. In this case the concurrent |
* RPM resume will be followed by its RPM suspend counterpart. Still |
* for consistency return -EAGAIN, which will reschedule this suspend. |
*/ |
if (!mutex_trylock(&dev->struct_mutex)) { |
DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); |
/* |
* Bump the expiration timestamp, otherwise the suspend won't |
* be rescheduled. |
*/ |
pm_runtime_mark_last_busy(device); |
|
return -EAGAIN; |
} |
/* |
* We are safe here against re-faults, since the fault handler takes |
* an RPM reference. |
*/ |
i915_gem_release_all_mmaps(dev_priv); |
mutex_unlock(&dev->struct_mutex); |
|
/* |
* rps.work can't be rearmed here, since we get here only after making |
* sure the GPU is idle and the RPS freq is set to the minimum. See |
* intel_mark_idle(). |
*/ |
cancel_work_sync(&dev_priv->rps.work); |
intel_runtime_pm_disable_interrupts(dev); |
|
if (IS_GEN6(dev)) { |
ret = 0; |
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
ret = hsw_runtime_suspend(dev_priv); |
} else if (IS_VALLEYVIEW(dev)) { |
ret = vlv_runtime_suspend(dev_priv); |
} else { |
ret = -ENODEV; |
WARN_ON(1); |
} |
|
if (ret) { |
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
intel_runtime_pm_restore_interrupts(dev); |
|
return ret; |
} |
|
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
dev_priv->pm.suspended = true; |
|
/* |
* current versions of firmware which depend on this opregion |
* notification have repurposed the D1 definition to mean |
* "runtime suspended" vs. what you would normally expect (D3) |
* to distinguish it from notifications that might be sent |
* via the suspend path. |
*/ |
intel_opregion_notify_adapter(dev, PCI_D1); |
|
DRM_DEBUG_KMS("Device suspended\n"); |
return 0; |
} |
|
static int intel_runtime_resume(struct device *device) |
{ |
struct pci_dev *pdev = to_pci_dev(device); |
struct drm_device *dev = pci_get_drvdata(pdev); |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
WARN_ON(!HAS_RUNTIME_PM(dev)); |
|
DRM_DEBUG_KMS("Resuming device\n"); |
|
intel_opregion_notify_adapter(dev, PCI_D0); |
dev_priv->pm.suspended = false; |
|
if (IS_GEN6(dev)) { |
ret = snb_runtime_resume(dev_priv); |
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
ret = hsw_runtime_resume(dev_priv); |
} else if (IS_VALLEYVIEW(dev)) { |
ret = vlv_runtime_resume(dev_priv); |
} else { |
WARN_ON(1); |
ret = -ENODEV; |
} |
|
/* |
* No point of rolling back things in case of an error, as the best |
* we can do is to hope that things will still work (and disable RPM). |
*/ |
i915_gem_init_swizzling(dev); |
gen6_update_ring_freq(dev); |
|
intel_runtime_pm_restore_interrupts(dev); |
intel_reset_gt_powersave(dev); |
|
if (ret) |
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); |
else |
DRM_DEBUG_KMS("Device resumed\n"); |
|
return ret; |
} |
|
static const struct dev_pm_ops i915_pm_ops = { |
.suspend = i915_pm_suspend, |
.suspend_late = i915_pm_suspend_late, |
.resume_early = i915_pm_resume_early, |
.resume = i915_pm_resume, |
.freeze = i915_pm_freeze, |
.thaw_early = i915_pm_thaw_early, |
.thaw = i915_pm_thaw, |
.poweroff = i915_pm_poweroff, |
.restore_early = i915_pm_resume_early, |
.restore = i915_pm_resume, |
.runtime_suspend = intel_runtime_suspend, |
.runtime_resume = intel_runtime_resume, |
}; |
|
static const struct vm_operations_struct i915_gem_vm_ops = { |
.fault = i915_gem_fault, |
.open = drm_gem_vm_open, |
.close = drm_gem_vm_close, |
}; |
|
static const struct file_operations i915_driver_fops = { |
.owner = THIS_MODULE, |
.open = drm_open, |
.release = drm_release, |
.unlocked_ioctl = drm_ioctl, |
.mmap = drm_gem_mmap, |
.poll = drm_poll, |
.read = drm_read, |
#ifdef CONFIG_COMPAT |
.compat_ioctl = i915_compat_ioctl, |
#endif |
.llseek = noop_llseek, |
}; |
#endif |
|
static struct drm_driver driver = { |
/* Don't use MTRRs here; the Xserver or userspace app should |
* deal with them for Intel hardware. |