65,80 → 65,22 |
#include "intel_drv.h" |
#include "i915_drv.h" |
|
static void intel_increase_pllclock(struct drm_device *dev, |
enum pipe pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int dpll_reg = DPLL(pipe); |
int dpll; |
|
if (!HAS_GMCH_DISPLAY(dev)) |
return; |
|
if (!dev_priv->lvds_downclock_avail) |
return; |
|
dpll = I915_READ(dpll_reg); |
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
|
assert_panel_unlocked(dev_priv, pipe); |
|
dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
I915_WRITE(dpll_reg, dpll); |
intel_wait_for_vblank(dev, pipe); |
|
dpll = I915_READ(dpll_reg); |
if (dpll & DISPLAY_RATE_SELECT_FPA1) |
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
} |
} |
|
/** |
* intel_mark_fb_busy - mark given planes as busy |
* @dev: DRM device |
* @frontbuffer_bits: bits for the affected planes |
* @ring: optional ring for asynchronous commands |
* |
* This function gets called every time the screen contents change. It can be |
* used to keep e.g. the update rate at the nominal refresh rate with DRRS. |
*/ |
static void intel_mark_fb_busy(struct drm_device *dev, |
unsigned frontbuffer_bits, |
struct intel_engine_cs *ring) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum pipe pipe; |
|
if (!i915.powersave) |
return; |
|
for_each_pipe(dev_priv, pipe) { |
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) |
continue; |
|
intel_increase_pllclock(dev, pipe); |
if (ring && intel_fbc_enabled(dev)) |
ring->fbc_dirty = true; |
} |
} |
|
/** |
* intel_fb_obj_invalidate - invalidate frontbuffer object |
* @obj: GEM object to invalidate |
* @ring: set for asynchronous rendering |
* @origin: which operation caused the invalidation |
* |
* This function gets called every time rendering on the given object starts and |
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must |
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed |
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed |
* until the rendering completes or a flip on this frontbuffer plane is |
* scheduled. |
*/ |
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, |
struct intel_engine_cs *ring) |
enum fb_op_origin origin) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = to_i915(dev); |
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
|
145,7 → 87,7 |
if (!obj->frontbuffer_bits) |
return; |
|
if (ring) { |
if (origin == ORIGIN_CS) { |
mutex_lock(&dev_priv->fb_tracking.lock); |
dev_priv->fb_tracking.busy_bits |
|= obj->frontbuffer_bits; |
154,9 → 96,9 |
mutex_unlock(&dev_priv->fb_tracking.lock); |
} |
|
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); |
|
intel_psr_invalidate(dev, obj->frontbuffer_bits); |
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); |
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin); |
} |
|
/** |
163,6 → 105,7 |
* intel_frontbuffer_flush - flush frontbuffer |
* @dev: DRM device |
* @frontbuffer_bits: frontbuffer plane tracking bits |
* @origin: which operation caused the flush |
* |
* This function gets called every time rendering on the given planes has |
* completed and frontbuffer caching can be started again. Flushes will get |
170,10 → 113,11 |
* |
* Can be called without any locks held. |
*/ |
void intel_frontbuffer_flush(struct drm_device *dev, |
unsigned frontbuffer_bits) |
static void intel_frontbuffer_flush(struct drm_device *dev, |
unsigned frontbuffer_bits, |
enum fb_op_origin origin) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = to_i915(dev); |
|
/* Delay flushing when rings are still busy.*/ |
mutex_lock(&dev_priv->fb_tracking.lock); |
180,25 → 124,19 |
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; |
mutex_unlock(&dev_priv->fb_tracking.lock); |
|
intel_mark_fb_busy(dev, frontbuffer_bits, NULL); |
if (!frontbuffer_bits) |
return; |
|
intel_psr_flush(dev, frontbuffer_bits); |
|
/* |
* FIXME: Unconditional fbc flushing here is a rather gross hack and |
* needs to be reworked into a proper frontbuffer tracking scheme like |
* psr employs. |
*/ |
if (dev_priv->fbc.need_sw_cache_clean) { |
dev_priv->fbc.need_sw_cache_clean = false; |
bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN); |
intel_edp_drrs_flush(dev, frontbuffer_bits); |
intel_psr_flush(dev, frontbuffer_bits, origin); |
intel_fbc_flush(dev_priv, frontbuffer_bits, origin); |
} |
} |
|
/** |
* intel_fb_obj_flush - flush frontbuffer object |
* @obj: GEM object to flush |
* @retire: set when retiring asynchronous rendering |
* @origin: which operation caused the flush |
* |
* This function gets called every time rendering on the given object has |
* completed and frontbuffer caching can be started again. If @retire is true |
205,10 → 143,10 |
* then any delayed flushes will be unblocked. |
*/ |
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, |
bool retire) |
bool retire, enum fb_op_origin origin) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = to_i915(dev); |
unsigned frontbuffer_bits; |
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
227,7 → 165,7 |
mutex_unlock(&dev_priv->fb_tracking.lock); |
} |
|
intel_frontbuffer_flush(dev, frontbuffer_bits); |
intel_frontbuffer_flush(dev, frontbuffer_bits, origin); |
} |
|
/** |
245,7 → 183,7 |
void intel_frontbuffer_flip_prepare(struct drm_device *dev, |
unsigned frontbuffer_bits) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = to_i915(dev); |
|
mutex_lock(&dev_priv->fb_tracking.lock); |
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; |
252,6 → 190,8 |
/* Remove stale busy bits due to the old buffer. */ |
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; |
mutex_unlock(&dev_priv->fb_tracking.lock); |
|
intel_psr_single_frame_update(dev, frontbuffer_bits); |
} |
|
/** |
267,7 → 207,7 |
void intel_frontbuffer_flip_complete(struct drm_device *dev, |
unsigned frontbuffer_bits) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = to_i915(dev); |
|
mutex_lock(&dev_priv->fb_tracking.lock); |
/* Mask any cancelled flips. */ |
275,5 → 215,29 |
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; |
mutex_unlock(&dev_priv->fb_tracking.lock); |
|
intel_frontbuffer_flush(dev, frontbuffer_bits); |
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); |
} |
|
/** |
* intel_frontbuffer_flip - synchronous frontbuffer flip |
* @dev: DRM device |
* @frontbuffer_bits: frontbuffer plane tracking bits |
* |
* This function gets called after scheduling a flip on @obj. This is for |
* synchronous plane updates which will happen on the next vblank and which will |
* not get delayed by pending gpu rendering. |
* |
* Can be called without any locks held. |
*/ |
void intel_frontbuffer_flip(struct drm_device *dev, |
unsigned frontbuffer_bits) |
{ |
struct drm_i915_private *dev_priv = to_i915(dev); |
|
mutex_lock(&dev_priv->fb_tracking.lock); |
/* Remove stale busy bits due to the old buffer. */ |
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; |
mutex_unlock(&dev_priv->fb_tracking.lock); |
|
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); |
} |