Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7143 → Rev 7144

/drivers/video/drm/i915/intel_fbc.c
43,7 → 43,7
 
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
return dev_priv->fbc.activate != NULL;
return HAS_FBC(dev_priv);
}
 
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
56,6 → 56,11
return INTEL_INFO(dev_priv)->gen < 4;
}
 
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen <= 3;
}
 
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
74,19 → 79,17
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
int *width, int *height)
{
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
int w, h;
 
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
w = drm_rect_height(&plane_state->src) >> 16;
h = drm_rect_width(&plane_state->src) >> 16;
if (intel_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
w = drm_rect_width(&plane_state->src) >> 16;
h = drm_rect_height(&plane_state->src) >> 16;
w = cache->plane.src_w;
h = cache->plane.src_h;
}
 
if (width)
95,18 → 98,17
*height = h;
}
 
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
struct drm_framebuffer *fb)
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
struct intel_fbc_state_cache *cache)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
int lines;
 
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
intel_fbc_get_plane_source_size(cache, NULL, &lines);
if (INTEL_INFO(dev_priv)->gen >= 7)
lines = min(lines, 2048);
 
/* Hardware needs the full buffer stride, not just the active area. */
return lines * fb->pitches[0];
return lines * cache->fb.stride;
}
 
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
113,8 → 115,6
{
u32 fbc_ctl;
 
dev_priv->fbc.active = false;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
130,21 → 130,17
}
}
 
static void i8xx_fbc_activate(struct intel_crtc *crtc)
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
int cfb_pitch;
int i;
u32 fbc_ctl;
 
dev_priv->fbc.active = true;
 
/* Note: fbc.threshold == 1 for i8xx */
cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
cfb_pitch = params->cfb_size / FBC_LL_SIZE;
if (params->fb.stride < cfb_pitch)
cfb_pitch = params->fb.stride;
 
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev_priv))
161,9 → 157,9
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
 
/* enable it... */
173,7 → 169,7
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
fbc_ctl |= params->fb.fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
 
182,23 → 178,19
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_fbc_activate(struct intel_crtc *crtc)
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
 
dev_priv->fbc.active = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
 
I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
208,8 → 200,6
{
u32 dpfc_ctl;
 
dev_priv->fbc.active = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
230,19 → 220,14
POSTING_READ(MSG_FBC_REND_STATE);
}
 
static void ilk_fbc_activate(struct intel_crtc *crtc)
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
unsigned int y_offset;
 
dev_priv->fbc.active = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
threshold++;
 
switch (threshold) {
259,18 → 244,17
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= obj->fence_reg;
dpfc_ctl |= params->fb.fence_reg;
 
y_offset = get_crtc_fence_y_offset(crtc);
I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
}
 
intel_fbc_recompress(dev_priv);
280,8 → 264,6
{
u32 dpfc_ctl;
 
dev_priv->fbc.active = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
295,21 → 277,17
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_fbc_activate(struct intel_crtc *crtc)
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
 
dev_priv->fbc.active = true;
 
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
 
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
threshold++;
 
switch (threshold) {
337,8 → 315,8
ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
HSW_FBCQ_DIS);
}
 
345,12 → 323,52
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 
intel_fbc_recompress(dev_priv);
}
 
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
else
return i8xx_fbc_is_active(dev_priv);
}
 
static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
fbc->active = true;
 
if (INTEL_INFO(dev_priv)->gen >= 7)
gen7_fbc_activate(dev_priv);
else if (INTEL_INFO(dev_priv)->gen >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
else
i8xx_fbc_activate(dev_priv);
}
 
static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
fbc->active = false;
 
if (INTEL_INFO(dev_priv)->gen >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
else
i8xx_fbc_deactivate(dev_priv);
}
 
/**
* intel_fbc_is_active - Is FBC active?
* @dev_priv: i915 device instance
364,25 → 382,25
return dev_priv->fbc.active;
}
 
static void intel_fbc_activate(const struct drm_framebuffer *fb)
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct drm_i915_private *dev_priv = fb->dev->dev_private;
struct intel_crtc *crtc = dev_priv->fbc.crtc;
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
struct intel_crtc *crtc = fbc->crtc;
struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
 
dev_priv->fbc.activate(crtc);
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
pipe_name(crtc->pipe));
 
dev_priv->fbc.fb_id = fb->base.id;
dev_priv->fbc.y = crtc->base.y;
mutex_lock(&fbc->lock);
work->scheduled = false;
mutex_unlock(&fbc->lock);
return;
}
 
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
struct intel_fbc_work *work = &dev_priv->fbc.work;
struct intel_crtc *crtc = dev_priv->fbc.crtc;
int delay_ms = 50;
 
retry:
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
390,16 → 408,18
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
* It is also worth mentioning that since work->scheduled_vblank can be
* updated multiple times by the other threads, hitting the timeout is
* not an error condition. We'll just end up hitting the "goto retry"
* case below.
*/
wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
wait_event_timeout(vblank->queue,
drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
msecs_to_jiffies(50));
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
/* Were we cancelled? */
if (!work->scheduled)
406,128 → 426,81
goto out;
 
/* Were we delayed again while this function was sleeping? */
if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
jiffies)) {
mutex_unlock(&dev_priv->fbc.lock);
if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
mutex_unlock(&fbc->lock);
goto retry;
}
 
if (crtc->base.primary->fb == work->fb)
intel_fbc_activate(work->fb);
intel_fbc_hw_activate(dev_priv);
 
work->scheduled = false;
 
out:
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
drm_crtc_vblank_put(&crtc->base);
}
 
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
dev_priv->fbc.work.scheduled = false;
}
 
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc_work *work = &dev_priv->fbc.work;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!mutex_is_locked(&fbc->lock));
 
/* It is useless to call intel_fbc_cancel_work() in this function since
* we're not releasing fbc.lock, so it won't have an opportunity to grab
* it to discover that it was cancelled. So we just update the expected
* jiffy count. */
work->fb = crtc->base.primary->fb;
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
pipe_name(crtc->pipe));
return;
}
 
/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
* this function since we're not releasing fbc.lock, so it won't have an
* opportunity to grab it to discover that it was cancelled. So we just
* update the expected jiffy count. */
work->scheduled = true;
work->enable_jiffies = jiffies;
work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
drm_crtc_vblank_put(&crtc->base);
 
schedule_work(&work->work);
}
 
static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
struct intel_fbc *fbc = &dev_priv->fbc;
 
intel_fbc_cancel_work(dev_priv);
WARN_ON(!mutex_is_locked(&fbc->lock));
 
if (dev_priv->fbc.active)
dev_priv->fbc.deactivate(dev_priv);
}
/* Calling cancel_work() here won't help due to the fact that the work
* function grabs fbc->lock. Just set scheduled to false so the work
* function can know it was cancelled. */
fbc->work.scheduled = false;
 
/*
* intel_fbc_deactivate - deactivate FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function deactivates FBC if it's associated with the provided CRTC.
*/
void intel_fbc_deactivate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc)
__intel_fbc_deactivate(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
}
 
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
const char *reason)
static bool multiple_pipes_ok(struct intel_crtc *crtc)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return;
 
dev_priv->fbc.no_fbc_reason = reason;
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
}
 
static bool crtc_can_fbc(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_plane *primary = crtc->base.primary;
struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe = crtc->pipe;
 
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
return false;
 
if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
return false;
 
/* Don't even bother tracking anything we don't need. */
if (!no_fbc_on_multiple_pipes(dev_priv))
return true;
}
 
static bool crtc_is_valid(struct intel_crtc *crtc)
{
if (!intel_crtc_active(&crtc->base))
return false;
WARN_ON(!drm_modeset_is_locked(&primary->mutex));
 
if (!to_intel_plane_state(crtc->base.primary->state)->visible)
return false;
if (to_intel_plane_state(primary->state)->visible)
fbc->visible_pipes_mask |= (1 << pipe);
else
fbc->visible_pipes_mask &= ~(1 << pipe);
 
return true;
return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
}
 
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
int n_pipes = 0;
struct drm_crtc *crtc;
 
if (INTEL_INFO(dev_priv)->gen > 4)
return true;
 
for_each_pipe(dev_priv, pipe) {
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->primary->state)->visible)
n_pipes++;
}
 
return (n_pipes < 2);
}
 
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
int size,
581,16 → 554,16
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->state->fb;
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
int size, fb_cpp, ret;
 
WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
 
size = intel_fbc_calculate_cfb_size(crtc, fb);
fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
 
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
599,12 → 572,12
 
}
 
dev_priv->fbc.threshold = ret;
fbc->threshold = ret;
 
if (INTEL_INFO(dev_priv)->gen >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
615,23 → 588,22
if (ret)
goto err_fb;
 
dev_priv->fbc.compressed_llb = compressed_llb;
fbc->compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
dev_priv->mm.stolen_base + fbc->compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
dev_priv->fbc.compressed_fb.size,
dev_priv->fbc.threshold);
fbc->compressed_fb.size, fbc->threshold);
 
return 0;
 
err_fb:
kfree(compressed_llb);
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
639,25 → 611,27
 
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
i915_gem_stolen_remove_node(dev_priv,
&dev_priv->fbc.compressed_fb);
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (dev_priv->fbc.compressed_llb) {
i915_gem_stolen_remove_node(dev_priv,
dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
if (drm_mm_node_allocated(&fbc->compressed_fb))
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
 
if (fbc->compressed_llb) {
i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
kfree(fbc->compressed_llb);
}
}
 
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
__intel_fbc_cleanup_cfb(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
}
 
static bool stride_is_valid(struct drm_i915_private *dev_priv,
681,12 → 655,10
return true;
}
 
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
uint32_t pixel_format)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
switch (fb->pixel_format) {
switch (pixel_format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
return true;
693,7 → 665,7
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
if (IS_GEN2(dev))
if (IS_GEN2(dev_priv))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
713,6 → 685,7
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
 
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
726,7 → 699,8
max_h = 1536;
}
 
intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
&effective_h);
effective_w += crtc->adjusted_x;
effective_h += crtc->adjusted_y;
 
733,80 → 707,97
return effective_w <= max_w && effective_h <= max_h;
}
 
/**
* __intel_fbc_update - activate/deactivate FBC as needed, unlocked
* @crtc: the CRTC that triggered the update
*
* This function completely reevaluates the status of FBC, then activates,
* deactivates or maintains it on the same state.
*/
static void __intel_fbc_update(struct intel_crtc *crtc)
static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
 
if (!multiple_pipes_ok(dev_priv)) {
set_no_fbc_reason(dev_priv, "more than one pipe active");
goto out_disable;
}
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate =
ilk_pipe_pixel_rate(crtc_state);
 
if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
cache->plane.rotation = plane_state->base.rotation;
cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
cache->plane.visible = plane_state->visible;
 
if (!cache->plane.visible)
return;
 
if (!crtc_is_valid(crtc)) {
set_no_fbc_reason(dev_priv, "no output");
goto out_disable;
obj = intel_fb_obj(fb);
 
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = obj->fence_reg;
cache->fb.tiling_mode = obj->tiling_mode;
}
 
fb = crtc->base.primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &crtc->config->base.adjusted_mode;
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
set_no_fbc_reason(dev_priv, "incompatible mode");
goto out_disable;
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
 
if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
(cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
fbc->no_fbc_reason = "incompatible mode";
return false;
}
 
if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
set_no_fbc_reason(dev_priv, "mode too large for compression");
goto out_disable;
fbc->no_fbc_reason = "mode too large for compression";
return false;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
goto out_disable;
if (cache->fb.tiling_mode != I915_TILING_X ||
cache->fb.fence_reg == I915_FENCE_REG_NONE) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
set_no_fbc_reason(dev_priv, "rotation unsupported");
goto out_disable;
cache->plane.rotation != BIT(DRM_ROTATE_0)) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
 
if (!stride_is_valid(dev_priv, fb->pitches[0])) {
set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
goto out_disable;
if (!stride_is_valid(dev_priv, cache->fb.stride)) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
 
if (!pixel_format_is_valid(fb)) {
set_no_fbc_reason(dev_priv, "pixel format is invalid");
goto out_disable;
if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
fbc->no_fbc_reason = "pixel format is invalid";
return false;
}
 
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
ilk_pipe_pixel_rate(crtc->config) >=
dev_priv->cdclk_freq * 95 / 100) {
set_no_fbc_reason(dev_priv, "pixel rate is too big");
goto out_disable;
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
fbc->no_fbc_reason = "pixel rate is too big";
return false;
}
 
/* It is possible for the required CFB size change without a
819,189 → 810,321
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
if (intel_fbc_calculate_cfb_size(crtc, fb) >
dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
set_no_fbc_reason(dev_priv, "CFB requirements changed");
goto out_disable;
if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
fbc->compressed_fb.size * fbc->threshold) {
fbc->no_fbc_reason = "CFB requirements changed";
return false;
}
 
return true;
}
 
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
bool enable_by_default = IS_BROADWELL(dev_priv);
 
if (intel_vgpu_active(dev_priv->dev)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
 
if (i915.enable_fbc < 0 && !enable_by_default) {
fbc->no_fbc_reason = "disabled per chip default";
return false;
}
 
if (!i915.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param";
return false;
}
 
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
fbc->no_fbc_reason = "no enabled pipes can have FBC";
return false;
}
 
if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
fbc->no_fbc_reason = "no enabled planes can have FBC";
return false;
}
 
return true;
}
 
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
/* Since all our fields are integer types, use memset here so the
* comparison function can rely on memcmp because the padding will be
* zero. */
memset(params, 0, sizeof(*params));
 
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
params->fb.pixel_format = cache->fb.pixel_format;
params->fb.stride = cache->fb.stride;
params->fb.fence_reg = cache->fb.fence_reg;
 
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 
params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
}
 
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
struct intel_fbc_reg_params *params2)
{
/* We can use this since intel_fbc_get_reg_params() does a memset. */
return memcmp(params1, params2, sizeof(*params1)) == 0;
}
 
void intel_fbc_pre_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&fbc->lock);
 
if (!multiple_pipes_ok(crtc)) {
fbc->no_fbc_reason = "more than one pipe active";
goto deactivate;
}
 
if (!fbc->enabled || fbc->crtc != crtc)
goto unlock;
 
intel_fbc_update_state_cache(crtc);
 
deactivate:
intel_fbc_deactivate(dev_priv);
unlock:
mutex_unlock(&fbc->lock);
}
 
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_reg_params old_params;
 
WARN_ON(!mutex_is_locked(&fbc->lock));
 
if (!fbc->enabled || fbc->crtc != crtc)
return;
 
if (!intel_fbc_can_activate(crtc)) {
WARN_ON(fbc->active);
return;
}
 
old_params = fbc->params;
intel_fbc_get_reg_params(crtc, &fbc->params);
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.crtc == crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->base.y &&
dev_priv->fbc.active)
if (fbc->active &&
intel_fbc_reg_params_equal(&old_params, &fbc->params))
return;
 
if (intel_fbc_is_active(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("deactivating FBC for update\n");
__intel_fbc_deactivate(dev_priv);
}
 
intel_fbc_deactivate(dev_priv);
intel_fbc_schedule_activation(crtc);
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_is_active(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
__intel_fbc_deactivate(dev_priv);
fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
}
}
 
/*
* intel_fbc_update - activate/deactivate FBC as needed
* @crtc: the CRTC that triggered the update
*
* This function reevaluates the overall state and activates or deactivates FBC.
*/
void intel_fbc_update(struct intel_crtc *crtc)
void intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_update(crtc);
mutex_unlock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
__intel_fbc_post_update(crtc);
mutex_unlock(&fbc->lock);
}
 
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
if (fbc->enabled)
return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
else
return fbc->possible_framebuffer_bits;
}
 
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
unsigned int fbc_bits;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
if (origin == ORIGIN_GTT)
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
 
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
if (fbc->enabled && fbc->busy_bits)
intel_fbc_deactivate(dev_priv);
 
if (dev_priv->fbc.busy_bits)
__intel_fbc_deactivate(dev_priv);
 
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
}
 
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
if (origin == ORIGIN_GTT)
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
fbc->busy_bits &= ~frontbuffer_bits;
 
if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
if (!fbc->busy_bits && fbc->enabled &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
} else {
__intel_fbc_deactivate(dev_priv);
__intel_fbc_update(dev_priv->fbc.crtc);
else
__intel_fbc_post_update(fbc->crtc);
}
 
mutex_unlock(&fbc->lock);
}
 
mutex_unlock(&dev_priv->fbc.lock);
/**
* intel_fbc_choose_crtc - select a CRTC to enable FBC on
* @dev_priv: i915 device instance
* @state: the atomic state structure
*
* This function looks at the proposed state for CRTCs and planes, then chooses
* which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
* true.
*
* Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
* enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
*/
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
bool fbc_crtc_present = false;
int i, j;
 
mutex_lock(&fbc->lock);
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (fbc->crtc == to_intel_crtc(crtc)) {
fbc_crtc_present = true;
break;
}
}
/* This atomic commit doesn't involve the CRTC currently tied to FBC. */
if (!fbc_crtc_present && fbc->crtc != NULL)
goto out;
 
/* Simply choose the first CRTC that is compatible and has a visible
* plane. We could go for fancier schemes such as checking the plane
* size, but this would just affect the few platforms that don't tie FBC
* to pipe or plane A. */
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
 
if (!intel_plane_state->visible)
continue;
 
for_each_crtc_in_state(state, crtc, crtc_state, j) {
struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc_state);
 
if (plane_state->crtc != crtc)
continue;
 
if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
break;
 
intel_crtc_state->enable_fbc = true;
goto out;
}
}
 
out:
mutex_unlock(&fbc->lock);
}
 
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
*
* This function checks if it's possible to enable FBC on the following CRTC,
* then enables it. Notice that it doesn't activate FBC.
* This function checks if the given CRTC was chosen for FBC, then enables it if
* possible. Notice that it doesn't activate FBC. It is valid to call
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
void intel_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
if (dev_priv->fbc.enabled) {
WARN_ON(dev_priv->fbc.crtc == crtc);
goto out;
if (fbc->enabled) {
WARN_ON(fbc->crtc == NULL);
if (fbc->crtc == crtc) {
WARN_ON(!crtc->config->enable_fbc);
WARN_ON(fbc->active);
}
 
WARN_ON(dev_priv->fbc.active);
WARN_ON(dev_priv->fbc.crtc != NULL);
 
if (intel_vgpu_active(dev_priv->dev)) {
set_no_fbc_reason(dev_priv, "VGPU is active");
goto out;
}
 
if (i915.enable_fbc < 0) {
set_no_fbc_reason(dev_priv, "disabled per chip default");
if (!crtc->config->enable_fbc)
goto out;
}
 
if (!i915.enable_fbc) {
set_no_fbc_reason(dev_priv, "disabled per module param");
goto out;
}
WARN_ON(fbc->active);
WARN_ON(fbc->crtc != NULL);
 
if (!crtc_can_fbc(crtc)) {
set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
goto out;
}
 
intel_fbc_update_state_cache(crtc);
if (intel_fbc_alloc_cfb(crtc)) {
set_no_fbc_reason(dev_priv, "not enough stolen memory");
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
 
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.crtc = crtc;
fbc->enabled = true;
fbc->crtc = crtc;
out:
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
}
 
/**
1013,61 → 1136,91
*/
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc = dev_priv->fbc.crtc;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_crtc *crtc = fbc->crtc;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!dev_priv->fbc.enabled);
WARN_ON(dev_priv->fbc.active);
assert_pipe_disabled(dev_priv, crtc->pipe);
WARN_ON(!mutex_is_locked(&fbc->lock));
WARN_ON(!fbc->enabled);
WARN_ON(fbc->active);
WARN_ON(crtc->active);
 
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
__intel_fbc_cleanup_cfb(dev_priv);
 
dev_priv->fbc.enabled = false;
dev_priv->fbc.crtc = NULL;
fbc->enabled = false;
fbc->crtc = NULL;
}
 
/**
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
* intel_fbc_disable - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function disables FBC if it's associated with the provided CRTC.
*/
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
void intel_fbc_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc) {
WARN_ON(!dev_priv->fbc.enabled);
WARN_ON(dev_priv->fbc.active);
mutex_lock(&fbc->lock);
if (fbc->crtc == crtc) {
WARN_ON(!fbc->enabled);
WARN_ON(fbc->active);
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
 
cancel_work_sync(&fbc->work.work);
}
 
/**
* intel_fbc_disable - globally disable FBC
* intel_fbc_global_disable - globally disable FBC
* @dev_priv: i915 device instance
*
* This function disables FBC regardless of which CRTC is associated with it.
*/
void intel_fbc_disable(struct drm_i915_private *dev_priv)
void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.enabled)
mutex_lock(&fbc->lock);
if (fbc->enabled)
__intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
 
cancel_work_sync(&fbc->work.work);
}
 
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
*
* The FBC code needs to track CRTC visibility since the older platforms can't
* have FBC enabled while multiple pipes are used. This function does the
* initial setup at driver load to make sure FBC is matching the real hardware.
*/
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
 
/* Don't even bother tracking anything if we don't need. */
if (!no_fbc_on_multiple_pipes(dev_priv))
return;
 
for_each_intel_crtc(dev_priv->dev, crtc)
if (intel_crtc_active(&crtc->base) &&
to_intel_plane_state(crtc->base.primary->state)->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
 
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
*
1075,21 → 1228,22
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe;
 
INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
mutex_init(&dev_priv->fbc.lock);
dev_priv->fbc.enabled = false;
dev_priv->fbc.active = false;
dev_priv->fbc.work.scheduled = false;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
fbc->work.scheduled = false;
 
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
fbc->no_fbc_reason = "unsupported by this chipset";
return;
}
 
for_each_pipe(dev_priv, pipe) {
dev_priv->fbc.possible_framebuffer_bits |=
fbc->possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe);
 
if (fbc_on_pipe_a_only(dev_priv))
1096,30 → 1250,13
break;
}
 
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->fbc.is_active = ilk_fbc_is_active;
dev_priv->fbc.activate = gen7_fbc_activate;
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->fbc.is_active = ilk_fbc_is_active;
dev_priv->fbc.activate = ilk_fbc_activate;
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (IS_GM45(dev_priv)) {
dev_priv->fbc.is_active = g4x_fbc_is_active;
dev_priv->fbc.activate = g4x_fbc_activate;
dev_priv->fbc.deactivate = g4x_fbc_deactivate;
} else {
dev_priv->fbc.is_active = i8xx_fbc_is_active;
dev_priv->fbc.activate = i8xx_fbc_activate;
dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
 
/* This value was pulled out of someone's hat */
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
 
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */
if (dev_priv->fbc.is_active(dev_priv))
dev_priv->fbc.deactivate(dev_priv);
if (intel_fbc_hw_is_active(dev_priv))
intel_fbc_hw_deactivate(dev_priv);
}