49,25 → 49,88 |
* present for a given platform. |
*/ |
|
#define GEN9_ENABLE_DC5(dev) 0 |
#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev) |
|
#define for_each_power_well(i, power_well, domain_mask, power_domains) \ |
for (i = 0; \ |
i < (power_domains)->power_well_count && \ |
((power_well) = &(power_domains)->power_wells[i]); \ |
i++) \ |
if ((power_well)->domains & (domain_mask)) |
for_each_if ((power_well)->domains & (domain_mask)) |
|
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ |
for (i = (power_domains)->power_well_count - 1; \ |
i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ |
i--) \ |
if ((power_well)->domains & (domain_mask)) |
for_each_if ((power_well)->domains & (domain_mask)) |
|
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, |
int power_well_id); |
|
const char * |
intel_display_power_domain_str(enum intel_display_power_domain domain) |
{ |
switch (domain) { |
case POWER_DOMAIN_PIPE_A: |
return "PIPE_A"; |
case POWER_DOMAIN_PIPE_B: |
return "PIPE_B"; |
case POWER_DOMAIN_PIPE_C: |
return "PIPE_C"; |
case POWER_DOMAIN_PIPE_A_PANEL_FITTER: |
return "PIPE_A_PANEL_FITTER"; |
case POWER_DOMAIN_PIPE_B_PANEL_FITTER: |
return "PIPE_B_PANEL_FITTER"; |
case POWER_DOMAIN_PIPE_C_PANEL_FITTER: |
return "PIPE_C_PANEL_FITTER"; |
case POWER_DOMAIN_TRANSCODER_A: |
return "TRANSCODER_A"; |
case POWER_DOMAIN_TRANSCODER_B: |
return "TRANSCODER_B"; |
case POWER_DOMAIN_TRANSCODER_C: |
return "TRANSCODER_C"; |
case POWER_DOMAIN_TRANSCODER_EDP: |
return "TRANSCODER_EDP"; |
case POWER_DOMAIN_PORT_DDI_A_LANES: |
return "PORT_DDI_A_LANES"; |
case POWER_DOMAIN_PORT_DDI_B_LANES: |
return "PORT_DDI_B_LANES"; |
case POWER_DOMAIN_PORT_DDI_C_LANES: |
return "PORT_DDI_C_LANES"; |
case POWER_DOMAIN_PORT_DDI_D_LANES: |
return "PORT_DDI_D_LANES"; |
case POWER_DOMAIN_PORT_DDI_E_LANES: |
return "PORT_DDI_E_LANES"; |
case POWER_DOMAIN_PORT_DSI: |
return "PORT_DSI"; |
case POWER_DOMAIN_PORT_CRT: |
return "PORT_CRT"; |
case POWER_DOMAIN_PORT_OTHER: |
return "PORT_OTHER"; |
case POWER_DOMAIN_VGA: |
return "VGA"; |
case POWER_DOMAIN_AUDIO: |
return "AUDIO"; |
case POWER_DOMAIN_PLLS: |
return "PLLS"; |
case POWER_DOMAIN_AUX_A: |
return "AUX_A"; |
case POWER_DOMAIN_AUX_B: |
return "AUX_B"; |
case POWER_DOMAIN_AUX_C: |
return "AUX_C"; |
case POWER_DOMAIN_AUX_D: |
return "AUX_D"; |
case POWER_DOMAIN_GMBUS: |
return "GMBUS"; |
case POWER_DOMAIN_INIT: |
return "INIT"; |
case POWER_DOMAIN_MODESET: |
return "MODESET"; |
default: |
MISSING_CASE(domain); |
return "?"; |
} |
} |
|
static void intel_power_well_enable(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well) |
{ |
244,13 → 307,7 |
gen8_irq_power_well_post_enable(dev_priv, |
1 << PIPE_C | 1 << PIPE_B); |
} |
|
if (power_well->data == SKL_DISP_PW_1) { |
if (!dev_priv->power_domains.initializing) |
intel_prepare_ddi(dev); |
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); |
} |
} |
|
static void hsw_set_power_well(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well, bool enable) |
292,13 → 349,10 |
BIT(POWER_DOMAIN_TRANSCODER_C) | \ |
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ |
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ |
BIT(POWER_DOMAIN_AUX_B) | \ |
BIT(POWER_DOMAIN_AUX_C) | \ |
BIT(POWER_DOMAIN_AUX_D) | \ |
305,45 → 359,28 |
BIT(POWER_DOMAIN_AUDIO) | \ |
BIT(POWER_DOMAIN_VGA) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ |
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ |
BIT(POWER_DOMAIN_PLLS) | \ |
BIT(POWER_DOMAIN_PIPE_A) | \ |
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ |
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ |
BIT(POWER_DOMAIN_AUX_A) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ |
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ |
BIT(POWER_DOMAIN_PLLS) | \ |
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ |
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ |
BIT(POWER_DOMAIN_MODESET) | \ |
BIT(POWER_DOMAIN_AUX_A) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ |
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ |
(POWER_DOMAIN_MASK & ~( \ |
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ |
SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \ |
SKL_DISPLAY_DDI_B_POWER_DOMAINS | \ |
SKL_DISPLAY_DDI_C_POWER_DOMAINS | \ |
SKL_DISPLAY_DDI_D_POWER_DOMAINS | \ |
SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \ |
SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ |
354,10 → 391,8 |
BIT(POWER_DOMAIN_TRANSCODER_C) | \ |
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ |
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_AUX_B) | \ |
BIT(POWER_DOMAIN_AUX_C) | \ |
BIT(POWER_DOMAIN_AUDIO) | \ |
369,11 → 404,15 |
BIT(POWER_DOMAIN_PIPE_A) | \ |
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ |
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ |
BIT(POWER_DOMAIN_AUX_A) | \ |
BIT(POWER_DOMAIN_PLLS) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ |
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ |
BIT(POWER_DOMAIN_MODESET) | \ |
BIT(POWER_DOMAIN_AUX_A) | \ |
BIT(POWER_DOMAIN_INIT)) |
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ |
(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ |
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ |
417,48 → 456,121 |
*/ |
} |
|
void bxt_enable_dc9(struct drm_i915_private *dev_priv) |
static void gen9_set_dc_state_debugmask_memory_up( |
struct drm_i915_private *dev_priv) |
{ |
uint32_t val; |
|
assert_can_enable_dc9(dev_priv); |
/* The below bit doesn't need to be cleared ever afterwards */ |
val = I915_READ(DC_STATE_DEBUG); |
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { |
val |= DC_STATE_DEBUG_MASK_MEMORY_UP; |
I915_WRITE(DC_STATE_DEBUG, val); |
POSTING_READ(DC_STATE_DEBUG); |
} |
} |
|
DRM_DEBUG_KMS("Enabling DC9\n"); |
static void gen9_write_dc_state(struct drm_i915_private *dev_priv, |
u32 state) |
{ |
int rewrites = 0; |
int rereads = 0; |
u32 v; |
|
val = I915_READ(DC_STATE_EN); |
val |= DC_STATE_EN_DC9; |
I915_WRITE(DC_STATE_EN, val); |
POSTING_READ(DC_STATE_EN); |
I915_WRITE(DC_STATE_EN, state); |
|
/* It has been observed that disabling the dc6 state sometimes |
* doesn't stick and dmc keeps returning old value. Make sure |
* the write really sticks enough times and also force rewrite until |
* we are confident that state is exactly what we want. |
*/ |
do { |
v = I915_READ(DC_STATE_EN); |
|
if (v != state) { |
I915_WRITE(DC_STATE_EN, state); |
rewrites++; |
rereads = 0; |
} else if (rereads++ > 5) { |
break; |
} |
|
void bxt_disable_dc9(struct drm_i915_private *dev_priv) |
} while (rewrites < 100); |
|
if (v != state) |
DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", |
state, v); |
|
/* Most of the times we need one retry, avoid spam */ |
if (rewrites > 1) |
DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", |
state, rewrites); |
} |
|
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) |
{ |
uint32_t val; |
uint32_t mask; |
|
assert_can_disable_dc9(dev_priv); |
mask = DC_STATE_EN_UPTO_DC5; |
if (IS_BROXTON(dev_priv)) |
mask |= DC_STATE_EN_DC9; |
else |
mask |= DC_STATE_EN_UPTO_DC6; |
|
DRM_DEBUG_KMS("Disabling DC9\n"); |
WARN_ON_ONCE(state & ~mask); |
|
if (i915.enable_dc == 0) |
state = DC_STATE_DISABLE; |
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) |
state = DC_STATE_EN_UPTO_DC5; |
|
if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK) |
gen9_set_dc_state_debugmask_memory_up(dev_priv); |
|
val = I915_READ(DC_STATE_EN); |
val &= ~DC_STATE_EN_DC9; |
I915_WRITE(DC_STATE_EN, val); |
POSTING_READ(DC_STATE_EN); |
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", |
val & mask, state); |
|
/* Check if DMC is ignoring our DC state requests */ |
if ((val & mask) != dev_priv->csr.dc_state) |
DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", |
dev_priv->csr.dc_state, val & mask); |
|
val &= ~mask; |
val |= state; |
|
gen9_write_dc_state(dev_priv, val); |
|
dev_priv->csr.dc_state = val & mask; |
} |
|
static void gen9_set_dc_state_debugmask_memory_up( |
struct drm_i915_private *dev_priv) |
void bxt_enable_dc9(struct drm_i915_private *dev_priv) |
{ |
uint32_t val; |
assert_can_enable_dc9(dev_priv); |
|
/* The below bit doesn't need to be cleared ever afterwards */ |
val = I915_READ(DC_STATE_DEBUG); |
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { |
val |= DC_STATE_DEBUG_MASK_MEMORY_UP; |
I915_WRITE(DC_STATE_DEBUG, val); |
POSTING_READ(DC_STATE_DEBUG); |
DRM_DEBUG_KMS("Enabling DC9\n"); |
|
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); |
} |
|
void bxt_disable_dc9(struct drm_i915_private *dev_priv) |
{ |
assert_can_disable_dc9(dev_priv); |
|
DRM_DEBUG_KMS("Disabling DC9\n"); |
|
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
} |
|
static void assert_csr_loaded(struct drm_i915_private *dev_priv) |
{ |
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), |
"CSR program storage start is NULL\n"); |
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); |
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); |
} |
|
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
471,8 → 583,7 |
|
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), |
"DC5 already programmed to be enabled.\n"); |
WARN_ONCE(dev_priv->pm.suspended, |
"DC5 cannot be enabled, if platform is runtime-suspended.\n"); |
assert_rpm_wakelock_held(dev_priv); |
|
assert_csr_loaded(dev_priv); |
} |
479,8 → 590,6 |
|
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) |
{ |
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, |
SKL_DISP_PW_2); |
/* |
* During initialization, the firmware may not be loaded yet. |
* We still want to make sure that the DC enabling flag is cleared. |
488,42 → 597,18 |
if (dev_priv->power_domains.initializing) |
return; |
|
WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n"); |
WARN_ONCE(dev_priv->pm.suspended, |
"Disabling of DC5 while platform is runtime-suspended should never happen.\n"); |
assert_rpm_wakelock_held(dev_priv); |
} |
|
static void gen9_enable_dc5(struct drm_i915_private *dev_priv) |
{ |
uint32_t val; |
|
assert_can_enable_dc5(dev_priv); |
|
DRM_DEBUG_KMS("Enabling DC5\n"); |
|
gen9_set_dc_state_debugmask_memory_up(dev_priv); |
|
val = I915_READ(DC_STATE_EN); |
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; |
val |= DC_STATE_EN_UPTO_DC5; |
I915_WRITE(DC_STATE_EN, val); |
POSTING_READ(DC_STATE_EN); |
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); |
} |
|
static void gen9_disable_dc5(struct drm_i915_private *dev_priv) |
{ |
uint32_t val; |
|
assert_can_disable_dc5(dev_priv); |
|
DRM_DEBUG_KMS("Disabling DC5\n"); |
|
val = I915_READ(DC_STATE_EN); |
val &= ~DC_STATE_EN_UPTO_DC5; |
I915_WRITE(DC_STATE_EN, val); |
POSTING_READ(DC_STATE_EN); |
} |
|
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
547,40 → 632,37 |
if (dev_priv->power_domains.initializing) |
return; |
|
assert_csr_loaded(dev_priv); |
WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), |
"DC6 already programmed to be disabled.\n"); |
} |
|
static void skl_enable_dc6(struct drm_i915_private *dev_priv) |
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv) |
{ |
uint32_t val; |
assert_can_disable_dc5(dev_priv); |
|
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) |
assert_can_disable_dc6(dev_priv); |
|
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
} |
|
void skl_enable_dc6(struct drm_i915_private *dev_priv) |
{ |
assert_can_enable_dc6(dev_priv); |
|
DRM_DEBUG_KMS("Enabling DC6\n"); |
|
gen9_set_dc_state_debugmask_memory_up(dev_priv); |
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); |
|
val = I915_READ(DC_STATE_EN); |
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; |
val |= DC_STATE_EN_UPTO_DC6; |
I915_WRITE(DC_STATE_EN, val); |
POSTING_READ(DC_STATE_EN); |
} |
|
static void skl_disable_dc6(struct drm_i915_private *dev_priv) |
void skl_disable_dc6(struct drm_i915_private *dev_priv) |
{ |
uint32_t val; |
|
assert_can_disable_dc6(dev_priv); |
|
DRM_DEBUG_KMS("Disabling DC6\n"); |
|
val = I915_READ(DC_STATE_EN); |
val &= ~DC_STATE_EN_UPTO_DC6; |
I915_WRITE(DC_STATE_EN, val); |
POSTING_READ(DC_STATE_EN); |
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
} |
|
static void skl_set_power_well(struct drm_i915_private *dev_priv, |
630,21 → 712,17 |
!I915_READ(HSW_PWR_WELL_BIOS), |
"Invalid for power well status to be enabled, unless done by the BIOS, \ |
when request is to disable!\n"); |
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && |
power_well->data == SKL_DISP_PW_2) { |
if (SKL_ENABLE_DC6(dev)) { |
skl_disable_dc6(dev_priv); |
if (power_well->data == SKL_DISP_PW_2) { |
/* |
* DDI buffer programming unnecessary during driver-load/resume |
* as it's already done during modeset initialization then. |
* It's also invalid here as encoder list is still uninitialized. |
* DDI buffer programming unnecessary during |
* driver-load/resume as it's already done |
* during modeset initialization then. It's |
* also invalid here as encoder list is still |
* uninitialized. |
*/ |
if (!dev_priv->power_domains.initializing) |
intel_prepare_ddi(dev); |
} else { |
gen9_disable_dc5(dev_priv); |
} |
} |
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); |
} |
|
658,36 → 736,11 |
} |
} else { |
if (enable_requested) { |
if (IS_SKYLAKE(dev) && |
(power_well->data == SKL_DISP_PW_1) && |
(intel_csr_load_status_get(dev_priv) == FW_LOADED)) |
DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n"); |
else { |
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); |
POSTING_READ(HSW_PWR_WELL_DRIVER); |
DRM_DEBUG_KMS("Disabling %s\n", power_well->name); |
} |
|
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && |
power_well->data == SKL_DISP_PW_2) { |
enum csr_state state; |
/* TODO: wait for a completion event or |
* similar here instead of busy |
* waiting using wait_for function. |
*/ |
wait_for((state = intel_csr_load_status_get(dev_priv)) != |
FW_UNINITIALIZED, 1000); |
if (state != FW_LOADED) |
DRM_DEBUG("CSR firmware not ready (%d)\n", |
state); |
else |
if (SKL_ENABLE_DC6(dev)) |
skl_enable_dc6(dev_priv); |
else |
gen9_enable_dc5(dev_priv); |
} |
} |
} |
|
if (check_fuse_status) { |
if (power_well->data == SKL_DISP_PW_1) { |
760,6 → 813,41 |
skl_set_power_well(dev_priv, power_well, false); |
} |
|
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well) |
{ |
return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; |
} |
|
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well) |
{ |
gen9_disable_dc5_dc6(dev_priv); |
} |
|
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well) |
{ |
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) |
skl_enable_dc6(dev_priv); |
else |
gen9_enable_dc5(dev_priv); |
} |
|
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well) |
{ |
if (power_well->count > 0) { |
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
} else { |
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && |
i915.enable_dc != 1) |
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); |
else |
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); |
} |
} |
|
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, |
struct i915_power_well *power_well) |
{ |
974,10 → 1062,12 |
int power_well_id) |
{ |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
struct i915_power_well *power_well; |
int i; |
|
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { |
for (i = 0; i < power_domains->power_well_count; i++) { |
struct i915_power_well *power_well; |
|
power_well = &power_domains->power_wells[i]; |
if (power_well->data == power_well_id) |
return power_well; |
} |
1397,6 → 1487,22 |
chv_set_pipe_power_well(dev_priv, power_well, false); |
} |
|
static void |
__intel_display_power_get_domain(struct drm_i915_private *dev_priv, |
enum intel_display_power_domain domain) |
{ |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
struct i915_power_well *power_well; |
int i; |
|
for_each_power_well(i, power_well, BIT(domain), power_domains) { |
if (!power_well->count++) |
intel_power_well_enable(dev_priv, power_well); |
} |
|
power_domains->domain_use_count[domain]++; |
} |
|
/** |
* intel_display_power_get - grab a power domain reference |
* @dev_priv: i915 device instance |
1412,24 → 1518,53 |
void intel_display_power_get(struct drm_i915_private *dev_priv, |
enum intel_display_power_domain domain) |
{ |
struct i915_power_domains *power_domains; |
struct i915_power_well *power_well; |
int i; |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
|
intel_runtime_pm_get(dev_priv); |
|
power_domains = &dev_priv->power_domains; |
mutex_lock(&power_domains->lock); |
|
__intel_display_power_get_domain(dev_priv, domain); |
|
mutex_unlock(&power_domains->lock); |
} |
|
/** |
* intel_display_power_get_if_enabled - grab a reference for an enabled display power domain |
* @dev_priv: i915 device instance |
* @domain: power domain to reference |
* |
* This function grabs a power domain reference for @domain and ensures that the |
* power domain and all its parents are powered up. Therefore users should only |
* grab a reference to the innermost power domain they need. |
* |
* Any power domain reference obtained by this function must have a symmetric |
* call to intel_display_power_put() to release the reference again. |
*/ |
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, |
enum intel_display_power_domain domain) |
{ |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
bool is_enabled; |
|
if (!intel_runtime_pm_get_if_in_use(dev_priv)) |
return false; |
|
mutex_lock(&power_domains->lock); |
|
for_each_power_well(i, power_well, BIT(domain), power_domains) { |
if (!power_well->count++) |
intel_power_well_enable(dev_priv, power_well); |
if (__intel_display_power_is_enabled(dev_priv, domain)) { |
__intel_display_power_get_domain(dev_priv, domain); |
is_enabled = true; |
} else { |
is_enabled = false; |
} |
|
power_domains->domain_use_count[domain]++; |
mutex_unlock(&power_domains->lock); |
|
mutex_unlock(&power_domains->lock); |
if (!is_enabled) |
intel_runtime_pm_put(dev_priv); |
|
return is_enabled; |
} |
|
/** |
1452,13 → 1587,17 |
|
mutex_lock(&power_domains->lock); |
|
WARN_ON(!power_domains->domain_use_count[domain]); |
WARN(!power_domains->domain_use_count[domain], |
"Use count on domain %s is already zero\n", |
intel_display_power_domain_str(domain)); |
power_domains->domain_use_count[domain]--; |
|
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { |
WARN_ON(!power_well->count); |
WARN(!power_well->count, |
"Use count on power well %s is already zero", |
power_well->name); |
|
if (!--power_well->count && i915.disable_power_well) |
if (!--power_well->count) |
intel_power_well_disable(dev_priv, power_well); |
} |
|
1470,14 → 1609,10 |
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PIPE_A) | \ |
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ |
BIT(POWER_DOMAIN_PORT_CRT) | \ |
BIT(POWER_DOMAIN_PLLS) | \ |
BIT(POWER_DOMAIN_AUX_A) | \ |
1501,10 → 1636,8 |
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK |
|
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_PORT_CRT) | \ |
BIT(POWER_DOMAIN_AUX_B) | \ |
BIT(POWER_DOMAIN_AUX_C) | \ |
1511,39 → 1644,34 |
BIT(POWER_DOMAIN_INIT)) |
|
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_AUX_B) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_AUX_B) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_AUX_C) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_AUX_C) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
BIT(POWER_DOMAIN_AUX_B) | \ |
BIT(POWER_DOMAIN_AUX_C) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ |
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ |
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ |
BIT(POWER_DOMAIN_AUX_D) | \ |
BIT(POWER_DOMAIN_INIT)) |
|
1591,6 → 1719,13 |
.is_enabled = skl_power_well_enabled, |
}; |
|
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { |
.sync_hw = gen9_dc_off_power_well_sync_hw, |
.enable = gen9_dc_off_power_well_enable, |
.disable = gen9_dc_off_power_well_disable, |
.is_enabled = gen9_dc_off_power_well_enabled, |
}; |
|
static struct i915_power_well hsw_power_wells[] = { |
{ |
.name = "always-on", |
1646,6 → 1781,7 |
.always_on = 1, |
.domains = VLV_ALWAYS_ON_POWER_DOMAINS, |
.ops = &i9xx_always_on_power_well_ops, |
.data = PUNIT_POWER_WELL_ALWAYS_ON, |
}, |
{ |
.name = "display", |
1747,20 → 1883,29 |
.always_on = 1, |
.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, |
.ops = &i9xx_always_on_power_well_ops, |
.data = SKL_DISP_PW_ALWAYS_ON, |
}, |
{ |
.name = "power well 1", |
.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS, |
/* Handled by the DMC firmware */ |
.domains = 0, |
.ops = &skl_power_well_ops, |
.data = SKL_DISP_PW_1, |
}, |
{ |
.name = "MISC IO power well", |
.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS, |
/* Handled by the DMC firmware */ |
.domains = 0, |
.ops = &skl_power_well_ops, |
.data = SKL_DISP_PW_MISC_IO, |
}, |
{ |
.name = "DC off", |
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, |
.ops = &gen9_dc_off_power_well_ops, |
.data = SKL_DISP_PW_DC_OFF, |
}, |
{ |
.name = "power well 2", |
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, |
.ops = &skl_power_well_ops, |
1792,6 → 1937,34 |
}, |
}; |
|
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv) |
{ |
struct i915_power_well *well; |
|
if (!IS_SKYLAKE(dev_priv)) |
return; |
|
well = lookup_power_well(dev_priv, SKL_DISP_PW_1); |
intel_power_well_enable(dev_priv, well); |
|
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); |
intel_power_well_enable(dev_priv, well); |
} |
|
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv) |
{ |
struct i915_power_well *well; |
|
if (!IS_SKYLAKE(dev_priv)) |
return; |
|
well = lookup_power_well(dev_priv, SKL_DISP_PW_1); |
intel_power_well_disable(dev_priv, well); |
|
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); |
intel_power_well_disable(dev_priv, well); |
} |
|
static struct i915_power_well bxt_power_wells[] = { |
{ |
.name = "always-on", |
1806,11 → 1979,17 |
.data = SKL_DISP_PW_1, |
}, |
{ |
.name = "DC off", |
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, |
.ops = &gen9_dc_off_power_well_ops, |
.data = SKL_DISP_PW_DC_OFF, |
}, |
{ |
.name = "power well 2", |
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, |
.ops = &skl_power_well_ops, |
.data = SKL_DISP_PW_2, |
} |
}, |
}; |
|
static int |
1820,7 → 1999,7 |
if (disable_power_well >= 0) |
return !!disable_power_well; |
|
if (IS_SKYLAKE(dev_priv)) { |
if (IS_BROXTON(dev_priv)) { |
DRM_DEBUG_KMS("Disabling display power well support\n"); |
return 0; |
} |
1859,7 → 2038,7 |
set_power_wells(power_domains, hsw_power_wells); |
} else if (IS_BROADWELL(dev_priv->dev)) { |
set_power_wells(power_domains, bdw_power_wells); |
} else if (IS_SKYLAKE(dev_priv->dev)) { |
} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) { |
set_power_wells(power_domains, skl_power_wells); |
} else if (IS_BROXTON(dev_priv->dev)) { |
set_power_wells(power_domains, bxt_power_wells); |
1874,21 → 2053,6 |
return 0; |
} |
|
static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct device *device = &dev->pdev->dev; |
|
if (!HAS_RUNTIME_PM(dev)) |
return; |
|
if (!intel_enable_rc6(dev)) |
return; |
|
/* Make sure we're not suspended first. */ |
pm_runtime_get_sync(device); |
} |
|
/** |
* intel_power_domains_fini - finalizes the power domain structures |
* @dev_priv: i915 device instance |
1899,15 → 2063,32 |
*/ |
void intel_power_domains_fini(struct drm_i915_private *dev_priv) |
{ |
intel_runtime_pm_disable(dev_priv); |
struct device *device = &dev_priv->dev->pdev->dev; |
|
/* The i915.ko module is still not prepared to be loaded when |
/* |
* The i915.ko module is still not prepared to be loaded when |
* the power well is not enabled, so just enable it in case |
* we're going to unload/reload. */ |
* we're going to unload/reload. |
* The following also reacquires the RPM reference the core passed |
* to the driver during loading, which is dropped in |
* intel_runtime_pm_enable(). We have to hand back the control of the |
* device to the core with this reference held. |
*/ |
intel_display_set_init_power(dev_priv, true); |
|
/* Remove the refcount we took to keep power well support disabled. */ |
if (!i915.disable_power_well) |
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); |
|
/* |
* Remove the refcount we took in intel_runtime_pm_enable() in case |
* the platform doesn't support runtime PM. |
*/ |
if (!HAS_RUNTIME_PM(dev_priv)) |
pm_runtime_put(device); |
} |
|
static void intel_power_domains_resume(struct drm_i915_private *dev_priv) |
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) |
{ |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
struct i915_power_well *power_well; |
1922,6 → 2103,47 |
mutex_unlock(&power_domains->lock); |
} |
|
static void skl_display_core_init(struct drm_i915_private *dev_priv, |
bool resume) |
{ |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
uint32_t val; |
|
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
|
/* enable PCH reset handshake */ |
val = I915_READ(HSW_NDE_RSTWRN_OPT); |
I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); |
|
/* enable PG1 and Misc I/O */ |
mutex_lock(&power_domains->lock); |
skl_pw1_misc_io_init(dev_priv); |
mutex_unlock(&power_domains->lock); |
|
if (!resume) |
return; |
|
skl_init_cdclk(dev_priv); |
|
if (dev_priv->csr.dmc_payload) |
intel_csr_load_program(dev_priv); |
} |
|
static void skl_display_core_uninit(struct drm_i915_private *dev_priv) |
{ |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
|
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
|
skl_uninit_cdclk(dev_priv); |
|
/* The spec doesn't call for removing the reset handshake flag */ |
/* disable PG1 and Misc I/O */ |
mutex_lock(&power_domains->lock); |
skl_pw1_misc_io_fini(dev_priv); |
mutex_unlock(&power_domains->lock); |
} |
|
static void chv_phy_control_init(struct drm_i915_private *dev_priv) |
{ |
struct i915_power_well *cmn_bc = |
2044,7 → 2266,7 |
* This function initializes the hardware power domain state and enables all |
* power domains using intel_display_set_init_power(). |
*/ |
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) |
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct i915_power_domains *power_domains = &dev_priv->power_domains; |
2051,7 → 2273,9 |
|
power_domains->initializing = true; |
|
if (IS_CHERRYVIEW(dev)) { |
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { |
skl_display_core_init(dev_priv, resume); |
} else if (IS_CHERRYVIEW(dev)) { |
mutex_lock(&power_domains->lock); |
chv_phy_control_init(dev_priv); |
mutex_unlock(&power_domains->lock); |
2063,11 → 2287,34 |
|
/* For now, we need the power well to be always enabled. */ |
intel_display_set_init_power(dev_priv, true); |
intel_power_domains_resume(dev_priv); |
/* Disable power support if the user asked so. */ |
if (!i915.disable_power_well) |
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
intel_power_domains_sync_hw(dev_priv); |
power_domains->initializing = false; |
} |
|
/** |
* intel_power_domains_suspend - suspend power domain state |
* @dev_priv: i915 device instance |
* |
* This function prepares the hardware power domain state before entering |
* system suspend. It must be paired with intel_power_domains_init_hw(). |
*/ |
void intel_power_domains_suspend(struct drm_i915_private *dev_priv) |
{ |
/* |
* Even if power well support was disabled we still want to disable |
* power wells while we are system suspended. |
*/ |
if (!i915.disable_power_well) |
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); |
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
skl_display_core_uninit(dev_priv); |
} |
|
/** |
* intel_runtime_pm_get - grab a runtime pm reference |
* @dev_priv: i915 device instance |
* |
2082,14 → 2329,48 |
struct drm_device *dev = dev_priv->dev; |
struct device *device = &dev->pdev->dev; |
|
if (!HAS_RUNTIME_PM(dev)) |
return; |
pm_runtime_get_sync(device); |
|
pm_runtime_get_sync(device); |
WARN(dev_priv->pm.suspended, "Device still suspended.\n"); |
atomic_inc(&dev_priv->pm.wakeref_count); |
assert_rpm_wakelock_held(dev_priv); |
} |
|
/** |
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use |
* @dev_priv: i915 device instance |
* |
* This function grabs a device-level runtime pm reference if the device is |
* already in use and ensures that it is powered up. |
* |
* Any runtime pm reference obtained by this function must have a symmetric |
* call to intel_runtime_pm_put() to release the reference again. |
*/ |
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct device *device = &dev->pdev->dev; |
|
if (IS_ENABLED(CONFIG_PM)) { |
int ret = pm_runtime_get_if_in_use(device); |
|
/* |
* In cases runtime PM is disabled by the RPM core and we get |
* an -EINVAL return value we are not supposed to call this |
* function, since the power state is undefined. This applies |
* atm to the late/early system suspend/resume handlers. |
*/ |
WARN_ON_ONCE(ret < 0); |
if (ret <= 0) |
return false; |
} |
|
atomic_inc(&dev_priv->pm.wakeref_count); |
assert_rpm_wakelock_held(dev_priv); |
|
return true; |
} |
|
/** |
* intel_runtime_pm_get_noresume - grab a runtime pm reference |
* @dev_priv: i915 device instance |
* |
2111,11 → 2392,10 |
struct drm_device *dev = dev_priv->dev; |
struct device *device = &dev->pdev->dev; |
|
if (!HAS_RUNTIME_PM(dev)) |
return; |
assert_rpm_wakelock_held(dev_priv); |
pm_runtime_get_noresume(device); |
|
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); |
pm_runtime_get_noresume(device); |
atomic_inc(&dev_priv->pm.wakeref_count); |
} |
|
/** |
2131,8 → 2411,9 |
struct drm_device *dev = dev_priv->dev; |
struct device *device = &dev->pdev->dev; |
|
if (!HAS_RUNTIME_PM(dev)) |
return; |
assert_rpm_wakelock_held(dev_priv); |
if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) |
atomic_inc(&dev_priv->pm.atomic_seq); |
|
pm_runtime_mark_last_busy(device); |
pm_runtime_put_autosuspend(device); |
2153,22 → 2434,27 |
struct drm_device *dev = dev_priv->dev; |
struct device *device = &dev->pdev->dev; |
|
if (!HAS_RUNTIME_PM(dev)) |
return; |
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ |
pm_runtime_mark_last_busy(device); |
|
/* |
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a |
* requirement. |
* Take a permanent reference to disable the RPM functionality and drop |
* it only when unloading the driver. Use the low level get/put helpers, |
* so the driver's own RPM reference tracking asserts also work on |
* platforms without RPM support. |
*/ |
if (!intel_enable_rc6(dev)) { |
DRM_INFO("RC6 disabled, disabling runtime PM support\n"); |
return; |
if (!HAS_RUNTIME_PM(dev)) { |
pm_runtime_dont_use_autosuspend(device); |
pm_runtime_get_sync(device); |
} else { |
pm_runtime_use_autosuspend(device); |
} |
|
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ |
pm_runtime_mark_last_busy(device); |
pm_runtime_use_autosuspend(device); |
|
/* |
* The core calls the driver load handler with an RPM reference held. |
* We drop that here and will reacquire it during unloading in |
* intel_power_domains_fini(). |
*/ |
pm_runtime_put_autosuspend(device); |
} |
|