Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6936 → Rev 6937

/drivers/video/drm/i915/intel_display.c
44,6 → 44,8
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
#include <linux/dma-buf.h>
 
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
186,7 → 188,7
uint32_t clkcfg;
 
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return 200;
 
clkcfg = I915_READ(CLKCFG);
214,7 → 216,7
 
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!IS_VALLEYVIEW(dev_priv))
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
 
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
715,11 → 717,12
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
 
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
!IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
 
if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
1096,7 → 1099,7
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPEDSL(pipe);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
 
1136,7 → 1139,7
enum pipe pipe = crtc->pipe;
 
if (INTEL_INFO(dev)->gen >= 4) {
int reg = PIPECONF(cpu_transcoder);
i915_reg_t reg = PIPECONF(cpu_transcoder);
 
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1286,7 → 1289,7
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int pp_reg;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
1304,7 → 1307,7
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = VLV_PIPE_PP_CONTROL(pipe);
panel_pipe = pipe;
1348,6 → 1351,7
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum intel_display_power_domain power_domain;
 
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1354,12 → 1358,14
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
 
intel_display_power_put(dev_priv, power_domain);
} else {
cur_state = false;
}
 
I915_STATE_WARN(cur_state != state,
1422,7 → 1428,7
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
1481,8 → 1487,7
return false;
 
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1546,12 → 1551,13
}
 
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
enum pipe pipe, i915_reg_t reg,
u32 port_sel)
{
u32 val = I915_READ(reg);
I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
i915_mmio_reg_offset(reg), pipe_name(pipe));
 
I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
1559,12 → 1565,12
}
 
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
enum pipe pipe, i915_reg_t reg)
{
u32 val = I915_READ(reg);
I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
i915_mmio_reg_offset(reg), pipe_name(pipe));
 
I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
1600,14 → 1606,11
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = pipe_config->dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
1645,8 → 1648,6
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
 
mutex_lock(&dev_priv->sb_lock);
 
/* Enable back the 10bit clock to display controller */
1689,7 → 1690,7
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
1838,7 → 1839,7
unsigned int expected_mask)
{
u32 port_mask;
int dpll_reg;
i915_reg_t dpll_reg;
 
switch (dport->port) {
case PORT_B:
1963,7 → 1964,8
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t reg, val, pipeconf_val;
i915_reg_t reg;
uint32_t val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(!HAS_PCH_SPLIT(dev));
2052,7 → 2054,8
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
uint32_t reg, val;
i915_reg_t reg;
uint32_t val;
 
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
2069,7 → 2072,7
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
if (!HAS_PCH_IBX(dev)) {
if (HAS_PCH_CPT(dev)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
2107,10 → 2110,9
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pch_transcoder;
int reg;
i915_reg_t reg;
u32 val;
 
DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2130,7 → 2132,7
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv->dev))
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
if (crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
2171,7 → 2173,7
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
i915_reg_t reg;
u32 val;
 
DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2270,20 → 2272,20
fb_format_modifier, 0));
}
 
static int
static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct intel_rotation_info *info = &view->rotation_info;
struct intel_rotation_info *info = &view->params.rotation_info;
unsigned int tile_height, tile_pitch;
 
*view = i915_ggtt_view_normal;
 
if (!plane_state)
return 0;
return;
 
if (!intel_rotation_90_or_270(plane_state->rotation))
return 0;
return;
 
*view = i915_ggtt_view_rotated;
 
2310,8 → 2312,6
info->size_uv = info->width_pages_uv * info->height_pages_uv *
PAGE_SIZE;
}
 
return 0;
}
 
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2319,7 → 2319,7
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
IS_VALLEYVIEW(dev_priv))
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
else if (INTEL_INFO(dev_priv)->gen >= 4)
return 4 * 1024;
2330,9 → 2330,7
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request)
const struct drm_plane_state *plane_state)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
2367,9 → 2365,7
return -EINVAL;
}
 
ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
if (ret)
return ret;
intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
2388,11 → 2384,10
*/
intel_runtime_pm_get(dev_priv);
 
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
pipelined_request, &view);
ret = i915_gem_object_pin_to_display_plane(obj, alignment,
&view);
if (ret)
goto err_interruptible;
goto err_pm;
 
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
2418,14 → 2413,12
i915_gem_object_pin_fence(obj);
}
 
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
return 0;
 
err_unpin:
i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
err_pm:
intel_runtime_pm_put(dev_priv);
return ret;
}
2435,12 → 2428,10
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
int ret;
 
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
WARN_ONCE(ret, "Couldn't get view from plane state!");
intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
if (view.type == I915_GGTT_VIEW_NORMAL)
i915_gem_object_unpin_fence(obj);
2695,7 → 2686,7
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg = DSPCNTR(plane);
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
 
if (!visible || !fb) {
2825,7 → 2816,7
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg = DSPCNTR(plane);
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
 
if (!visible || !fb) {
2954,22 → 2945,22
struct drm_i915_gem_object *obj,
unsigned int plane)
{
const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
struct i915_ggtt_view view;
struct i915_vma *vma;
u64 offset;
 
if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
view = &i915_ggtt_view_rotated;
intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
intel_plane->base.state);
 
vma = i915_gem_obj_to_ggtt_view(obj, view);
vma = i915_gem_obj_to_ggtt_view(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
view->type))
view.type))
return -1;
 
offset = vma->node.start;
 
if (plane == 1) {
offset += vma->ggtt_view.rotation_info.uv_start_page *
offset += vma->ggtt_view.params.rotation_info.uv_start_page *
PAGE_SIZE;
}
 
3199,8 → 3190,8
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->fbc.disable_fbc)
dev_priv->fbc.disable_fbc(dev_priv);
if (dev_priv->fbc.deactivate)
dev_priv->fbc.deactivate(dev_priv);
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
3229,10 → 3220,9
struct intel_plane_state *plane_state;
 
drm_modeset_lock_crtc(crtc, &plane->base);
 
plane_state = to_intel_plane_state(plane->base.state);
 
if (plane_state->base.fb)
if (crtc->state->active && plane_state->base.fb)
plane->commit_plane(&plane->base, plane_state);
 
drm_modeset_unlock_crtc(crtc);
3308,32 → 3298,6
drm_modeset_unlock_all(dev);
}
 
static void
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
 
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
dev_priv->mm.interruptible = false;
ret = i915_gem_object_wait_rendering(obj, true);
dev_priv->mm.interruptible = was_interruptible;
 
WARN_ON(ret);
}
 
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3403,7 → 3367,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
/* enable normal train */
reg = FDI_TX_CTL(pipe);
3445,7 → 3410,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, tries;
i915_reg_t reg;
u32 temp, tries;
 
/* FDI needs bits from pipe first */
assert_pipe_enabled(dev_priv, pipe);
3545,7 → 3511,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i, retry;
i915_reg_t reg;
u32 temp, i, retry;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
3677,7 → 3644,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i, j;
i915_reg_t reg;
u32 temp, i, j;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
3794,9 → 3762,9
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
 
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
3831,7 → 3799,8
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
3861,7 → 3830,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
3948,21 → 3918,29
drm_crtc_vblank_put(&intel_crtc->base);
 
wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
 
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
 
queue_work(dev_priv->wq, &work->work);
}
 
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
long ret;
 
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
 
ret = wait_event_interruptible_timeout(
dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0)) {
60*HZ);
 
if (ret < 0)
return ret;
 
if (ret == 0) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
spin_lock_irq(&dev->event_lock);
3973,11 → 3951,22
spin_unlock_irq(&dev->event_lock);
}
 
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->primary->fb);
mutex_unlock(&dev->struct_mutex);
return 0;
}
 
static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
 
mutex_lock(&dev_priv->sb_lock);
 
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
/* Program iCLKIP clock to the desired frequency */
3989,19 → 3978,8
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
 
mutex_lock(&dev_priv->sb_lock);
lpt_disable_iclkip(dev_priv);
 
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
 
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
SBI_SSCCTL_DISABLE,
SBI_ICLK);
 
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (clock == 20000) {
auxdiv = 1;
4018,7 → 3996,7
u32 iclk_pi_range = 64;
u32 desired_divisor, msb_divisor_value, pi_value;
 
desired_divisor = (iclk_virtual_root_freq / clock);
desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
msb_divisor_value = desired_divisor / iclk_pi_range;
pi_value = desired_divisor % iclk_pi_range;
 
4040,6 → 4018,8
phasedir,
phaseinc);
 
mutex_lock(&dev_priv->sb_lock);
 
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4061,12 → 4041,12
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
mutex_unlock(&dev_priv->sb_lock);
 
/* Wait for initialization time */
udelay(24);
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4137,6 → 4117,22
}
}
 
/* Return which DP Port should be selected for Transcoder DP control */
static enum port
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
encoder->type == INTEL_OUTPUT_EDP)
return enc_to_dig_port(&encoder->base)->port;
}
 
return -1;
}
 
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
4151,7 → 4147,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
u32 temp;
 
assert_pch_transcoder_disabled(dev_priv, pipe);
 
4163,6 → 4159,12
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
/*
* Sometimes spurious CPU pipe underruns happen during FDI
* training, at least with VGA+HDMI cloning. Suppress them.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
 
4196,10 → 4198,14
 
intel_fdi_normal_train(crtc);
 
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
i915_reg_t reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
4207,19 → 4213,19
temp |= TRANS_DP_OUTPUT_ENABLE;
temp |= bpc << 9; /* same format but at 11:9 */
 
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 
switch (intel_trans_dp_port_sel(crtc)) {
case PCH_DP_B:
case PORT_B:
temp |= TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
case PORT_C:
temp |= TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
case PORT_D:
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
4359,7 → 4365,7
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
 
temp = I915_READ(dslreg);
4652,7 → 4658,7
return;
 
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
if (intel_crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
4669,7 → 4675,7
}
 
for (i = 0; i < 256; i++) {
u32 palreg;
i915_reg_t palreg;
 
if (HAS_GMCH_DISPLAY(dev))
palreg = PALETTE(pipe, i);
4723,14 → 4729,6
int pipe = intel_crtc->pipe;
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, pipe);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
4748,9 → 4746,9
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* Underruns don't raise interrupts, so check manually. */
if (HAS_GMCH_DISPLAY(dev))
i9xx_check_fifo_underruns(dev_priv);
/* Underruns don't always raise interrupts, so check manually. */
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
}
 
/**
4807,9 → 4805,9
static void intel_post_plane_update(struct intel_crtc *crtc)
{
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane *plane;
 
if (atomic->wait_vblank)
intel_wait_for_vblank(dev, crtc->pipe);
4816,22 → 4814,17
 
intel_frontbuffer_flip(dev, atomic->fb_bits);
 
if (atomic->disable_cxsr)
crtc->wm.cxsr_allowed = true;
 
if (crtc->atomic.update_wm_post)
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(&crtc->base);
 
if (atomic->update_fbc)
intel_fbc_update(dev_priv);
intel_fbc_update(crtc);
 
if (atomic->post_enable_primary)
intel_post_enable_primary(&crtc->base);
 
drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
intel_update_sprite_watermarks(plane, &crtc->base,
0, 0, 0, false, false);
 
memset(atomic, 0, sizeof(*atomic));
}
 
4840,23 → 4833,11
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct drm_plane *p;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
 
/* Track fb's for any planes being disabled */
drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
struct intel_plane *plane = to_intel_plane(p);
 
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
plane->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
}
 
if (atomic->wait_for_flips)
intel_crtc_wait_for_pending_flips(&crtc->base);
 
if (atomic->disable_fbc)
intel_fbc_disable_crtc(crtc);
intel_fbc_deactivate(crtc);
 
if (crtc->atomic.disable_ips)
hsw_disable_ips(crtc);
4864,10 → 4845,13
if (atomic->pre_disable_primary)
intel_pre_disable_primary(&crtc->base);
 
if (atomic->disable_cxsr) {
if (pipe_config->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
intel_set_memory_cxsr(dev_priv, false);
}
 
if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
intel_update_watermarks(&crtc->base);
}
 
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4902,6 → 4886,9
return;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
 
if (intel_crtc->config->has_dp_encoder)
4919,7 → 4906,6
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
4957,6 → 4943,13
 
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
 
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
intel_fbc_enable(intel_crtc);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
4974,11 → 4967,14
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (WARN_ON(intel_crtc->active))
return;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
 
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
 
5003,21 → 4999,20
 
intel_crtc->active = true;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
else
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (encoder->pre_enable)
encoder->pre_enable(encoder);
}
 
if (intel_crtc->config->has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
if (intel_crtc->config->has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
}
 
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_pipe_clock(intel_crtc);
 
if (INTEL_INFO(dev)->gen >= 9)
5032,7 → 5027,7
intel_crtc_load_lut(crtc);
 
intel_ddi_set_pipe_settings(crtc);
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_transcoder_func(crtc);
 
intel_update_watermarks(crtc);
5041,7 → 5036,7
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
 
if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
 
assert_vblank_disabled(crtc);
5052,6 → 5047,14
intel_opregion_notify_encoder(encoder, true);
}
 
if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev, pipe);
intel_wait_for_vblank(dev, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
}
 
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5059,6 → 5062,8
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
 
intel_fbc_enable(intel_crtc);
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5083,8 → 5088,10
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
5091,15 → 5098,22
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
/*
* Sometimes spurious CPU pipe underruns happen when the
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_disable_pipe(intel_crtc);
 
ironlake_pfit_disable(intel_crtc, false);
 
if (intel_crtc->config->has_pch_encoder)
if (intel_crtc->config->has_pch_encoder) {
ironlake_fdi_disable(crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
5109,6 → 5123,9
ironlake_disable_pch_transcoder(dev_priv, pipe);
 
if (HAS_PCH_CPT(dev)) {
i915_reg_t reg;
u32 temp;
 
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
5125,6 → 5142,10
 
ironlake_fdi_pll_disable(intel_crtc);
}
 
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void haswell_crtc_disable(struct drm_crtc *crtc)
5134,8 → 5155,11
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
5144,15 → 5168,12
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
 
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
 
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
if (INTEL_INFO(dev)->gen >= 9)
5160,17 → 5181,23
else
ironlake_pfit_disable(intel_crtc, false);
 
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_disable_pipe_clock(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (intel_crtc->config->has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
intel_ddi_fdi_disable(crtc);
 
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
intel_fbc_disable_crtc(intel_crtc);
}
 
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5201,15 → 5228,15
{
switch (port) {
case PORT_A:
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
return POWER_DOMAIN_PORT_DDI_A_LANES;
case PORT_B:
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
return POWER_DOMAIN_PORT_DDI_B_LANES;
case PORT_C:
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
return POWER_DOMAIN_PORT_DDI_C_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
return POWER_DOMAIN_PORT_DDI_D_LANES;
case PORT_E:
return POWER_DOMAIN_PORT_DDI_E_2_LANES;
return POWER_DOMAIN_PORT_DDI_E_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
5236,10 → 5263,6
}
}
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
 
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
5304,13 → 5327,11
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned long mask;
enum transcoder transcoder;
enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
 
if (!crtc->state->active)
return 0;
 
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (intel_crtc->config->pch_pfit.enabled ||
5397,7 → 5418,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5454,7 → 5475,7
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
5814,32 → 5835,16
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
DRM_ERROR("DBuf power disable timeout\n");
 
/*
* DMC assumes ownership of LCPLL and will get confused if we touch it.
*/
if (dev_priv->csr.dmc_payload) {
/* disable DPLL0 */
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
~LCPLL_PLL_ENABLE);
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
DRM_ERROR("Couldn't disable DPLL0\n");
}
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
 
void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
u32 val;
unsigned int required_vco;
 
/* enable PCH reset handshake */
val = I915_READ(HSW_NDE_RSTWRN_OPT);
I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
 
/* enable PG1 and Misc I/O */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
/* DPLL0 not enabled (happens on early BIOS versions) */
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
/* enable DPLL0 */
5860,6 → 5865,45
DRM_ERROR("DBuf power enable timeout\n");
}
 
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
uint32_t cdctl = I915_READ(CDCLK_CTL);
int freq = dev_priv->skl_boot_cdclk;
 
/*
* check if the pre-os intialized the display
* There is SWF18 scratchpad register defined which is set by the
* pre-os which can be used by the OS drivers to check the status
*/
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
 
/* Is PLL enabled and locked ? */
if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
goto sanitize;
 
/* DPLL okay; verify the cdclock
*
* Noticed in some instances that the freq selection is correct but
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
/* All well; nothing to sanitize */
return false;
sanitize:
/*
* As of now initialize with max cdclk till
* we get dynamic cdclk support
* */
dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
skl_init_cdclk(dev_priv);
 
/* we did have to sanitize */
return true;
}
 
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
6141,13 → 6185,10
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
bool is_dsi;
 
if (WARN_ON(intel_crtc->active))
return;
 
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
 
6170,7 → 6211,7
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
if (!is_dsi) {
if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
6188,6 → 6229,7
 
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
 
assert_vblank_disabled(crtc);
6249,6 → 6291,8
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
intel_fbc_enable(intel_crtc);
}
 
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6296,7 → 6340,7
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
6311,6 → 6355,8
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6324,7 → 6370,8
return;
 
if (to_intel_plane_state(crtc->primary->state)->visible) {
intel_crtc_wait_for_pending_flips(crtc);
WARN_ON(intel_crtc->unpin_work);
 
intel_pre_disable_primary(crtc);
 
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6449,13 → 6496,11
 
int intel_connector_init(struct intel_connector *connector)
{
struct drm_connector_state *connector_state;
drm_atomic_helper_connector_reset(&connector->base);
 
connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
if (!connector_state)
if (!connector->base.state)
return -ENOMEM;
 
connector->base.state = connector_state;
return 0;
}
 
6644,6 → 6689,15
pipe_config_supports_ips(dev_priv, pipe_config);
}
 
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
/* GDG double wide on either pipe, otherwise pipe A only */
return INTEL_INFO(dev_priv)->gen < 4 &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
 
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
6653,24 → 6707,25
 
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
int clock_limit = dev_priv->max_cdclk_freq;
int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
 
/*
* Enable pixel doubling when the dot clock
* Enable double wide mode when the dot clock
* is > 90% of the (display) core speed.
*
* GDG double wide on either pipe,
* otherwise pipe A only.
*/
if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
if (intel_crtc_supports_double_wide(crtc) &&
adjusted_mode->crtc_clock > clock_limit) {
clock_limit *= 2;
pipe_config->double_wide = true;
}
 
if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
if (adjusted_mode->crtc_clock > clock_limit) {
DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
adjusted_mode->crtc_clock, clock_limit,
yesno(pipe_config->double_wide));
return -EINVAL;
}
}
 
/*
* Pipe horizontal size must be even in:
7146,7 → 7201,7
 
WARN_ON(!crtc_state->base.state);
 
if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
refclk = 100000;
} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7434,7 → 7489,7
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
i915_reg_t dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7845,7 → 7900,7
pipeconf |= PIPECONF_DOUBLE_WIDE;
 
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
7885,7 → 7940,8
} else
pipeconf |= PIPECONF_PROGRESSIVE;
 
if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7900,8 → 7956,6
int refclk, num_connectors = 0;
intel_clock_t clock;
bool ok;
bool is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
7911,26 → 7965,14
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
if (crtc_state->has_dsi_encoder)
return 0;
 
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
 
switch (encoder->type) {
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
default:
break;
}
 
if (connector_state->crtc == &crtc->base)
num_connectors++;
}
 
if (is_dsi)
return 0;
 
if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc_state, num_connectors);
 
8133,20 → 8175,24
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
ret = false;
 
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
goto out;
 
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
8162,7 → 8208,8
}
}
 
if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
 
if (INTEL_INFO(dev)->gen < 4)
8190,7 → 8237,7
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
8223,7 → 8270,12
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void ironlake_init_pch_refclk(struct drm_device *dev)
8230,7 → 8282,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
8237,7 → 8288,6
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
bool using_ssc_source = false;
 
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
8264,23 → 8314,9
can_ssc = true;
}
 
/* Check if any DPLLs are using the SSC source */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
u32 temp = I915_READ(PCH_DPLL(i));
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
has_panel, has_lvds, has_ck505);
 
if (!(temp & DPLL_VCO_ENABLE))
continue;
 
if ((temp & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
using_ssc_source = true;
break;
}
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
has_panel, has_lvds, has_ck505, using_ssc_source);
 
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
8316,9 → 8352,9
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
} else if (using_ssc_source) {
final |= DREF_SSC_SOURCE_ENABLE;
final |= DREF_SSC1_ENABLE;
} else {
final |= DREF_SSC_SOURCE_DISABLE;
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
}
 
if (final == val)
8364,7 → 8400,7
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
DRM_DEBUG_KMS("Disabling CPU source output\n");
DRM_DEBUG_KMS("Disabling SSC entirely\n");
 
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
8375,9 → 8411,6
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
 
if (!using_ssc_source) {
DRM_DEBUG_KMS("Disabling SSC source\n");
 
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
val |= DREF_SSC_SOURCE_DISABLE;
8389,7 → 8422,6
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
}
 
BUG_ON(val != final);
}
8562,6 → 8594,67
mutex_unlock(&dev_priv->sb_lock);
}
 
#define BEND_IDX(steps) ((50 + (steps)) / 5)
 
static const uint16_t sscdivintphase[] = {
[BEND_IDX( 50)] = 0x3B23,
[BEND_IDX( 45)] = 0x3B23,
[BEND_IDX( 40)] = 0x3C23,
[BEND_IDX( 35)] = 0x3C23,
[BEND_IDX( 30)] = 0x3D23,
[BEND_IDX( 25)] = 0x3D23,
[BEND_IDX( 20)] = 0x3E23,
[BEND_IDX( 15)] = 0x3E23,
[BEND_IDX( 10)] = 0x3F23,
[BEND_IDX( 5)] = 0x3F23,
[BEND_IDX( 0)] = 0x0025,
[BEND_IDX( -5)] = 0x0025,
[BEND_IDX(-10)] = 0x0125,
[BEND_IDX(-15)] = 0x0125,
[BEND_IDX(-20)] = 0x0225,
[BEND_IDX(-25)] = 0x0225,
[BEND_IDX(-30)] = 0x0325,
[BEND_IDX(-35)] = 0x0325,
[BEND_IDX(-40)] = 0x0425,
[BEND_IDX(-45)] = 0x0425,
[BEND_IDX(-50)] = 0x0525,
};
 
/*
* Bend CLKOUT_DP
* steps -50 to 50 inclusive, in steps of 5
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
{
uint32_t tmp;
int idx = BEND_IDX(steps);
 
if (WARN_ON(steps % 5 != 0))
return;
 
if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
return;
 
mutex_lock(&dev_priv->sb_lock);
 
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
else
tmp = 0x00000000;
intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
tmp &= 0xffff0000;
tmp |= sscdivintphase[idx];
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
#undef BEND_IDX
 
static void lpt_init_pch_refclk(struct drm_device *dev)
{
struct intel_encoder *encoder;
8577,11 → 8670,13
}
}
 
if (has_vga)
if (has_vga) {
lpt_bend_clkout_dp(to_i915(dev), 0);
lpt_enable_clkout_dp(dev, true, true);
else
} else {
lpt_disable_clkout_dp(dev);
}
}
 
/*
* Initialize reference clocks when the driver loads
8943,7 → 9038,7
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
 
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9284,18 → 9379,21
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
goto out;
 
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
9358,7 → 9456,12
 
ironlake_get_pfit_config(crtc, pipe_config);
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9372,8 → 9475,8
 
I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
9719,14 → 9822,10
else
cdclk = 337500;
 
/*
* FIXME move the cdclk caclulation to
* compute_config() so we can fail gracegully.
*/
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
cdclk = dev_priv->max_cdclk_freq;
return -EINVAL;
}
 
to_intel_atomic_state(state)->cdclk = cdclk;
9821,6 → 9920,7
break;
case PORT_CLK_SEL_SPLL:
pipe_config->shared_dpll = DPLL_ID_SPLL;
break;
}
}
 
9837,7 → 9937,7
 
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_BROXTON(dev))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
9873,13 → 9973,18
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain pfit_domain;
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
uint32_t tmp;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
power_domain_mask = BIT(power_domain);
 
ret = false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
9905,13 → 10010,14
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
goto out;
power_domain_mask |= BIT(power_domain);
 
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
return false;
goto out;
 
haswell_get_ddi_port_state(crtc, pipe_config);
 
9921,14 → 10027,14
skl_init_scalers(dev, crtc, pipe_config);
}
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
 
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
9946,7 → 10052,13
pipe_config->pixel_multiplier = 1;
}
 
return true;
ret = true;
 
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10178,10 → 10290,8
int ret;
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference(&obj->base);
if (!intel_fb)
return ERR_PTR(-ENOMEM);
}
 
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret)
10188,10 → 10298,9
goto err;
 
return &intel_fb->base;
 
err:
drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
 
return ERR_PTR(ret);
}
 
10231,6 → 10340,7
struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
10245,7 → 10355,11
bpp);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 
return intel_framebuffer_create(dev, &mode_cmd, obj);
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
drm_gem_object_unreference_unlocked(&obj->base);
 
return fb;
}
 
static struct drm_framebuffer *
10780,7 → 10894,7
spin_unlock_irq(&dev->event_lock);
 
if (work) {
cancel_work_sync(&work->work);
// cancel_work_sync(&work->work);
kfree(work);
}
 
11148,7 → 11262,7
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
11158,7 → 11272,7
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
intel_ring_emit(ring, 0);
11198,11 → 11312,16
return true;
else if (i915.enable_execlists)
return true;
// else if (obj->base.dma_buf &&
// !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
// false))
// return true;
else
return ring != i915_gem_request_get_ring(obj->last_write_req);
}
 
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
unsigned int rotation,
struct intel_unpin_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
11209,7 → 11328,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride;
u32 ctl, stride, tile_height;
 
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
11233,9 → 11352,16
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
} else {
stride = fb->pitches[0] /
intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
}
 
/*
* Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11256,10 → 11382,9
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
i915_reg_t reg = DSPCNTR(intel_crtc->plane);
u32 dspcntr;
u32 reg;
 
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
 
if (obj->tiling_mode != I915_TILING_NONE)
11293,7 → 11418,7
intel_pipe_update_start(crtc);
 
if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
skl_do_mmio_flip(crtc, work);
skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
else
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(crtc, work);
11305,6 → 11430,9
{
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
 
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
11314,6 → 11442,12
i915_gem_request_unreference__unlocked(mmio_flip->req);
}
 
/* For framebuffer backed by dmabuf, wait for fence */
// if (obj->base.dma_buf)
// WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
// false, false,
// MAX_SCHEDULE_TIMEOUT) < 0);
 
intel_do_mmio_flip(mmio_flip);
kfree(mmio_flip);
}
11320,10 → 11454,7
 
static int intel_queue_mmio_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
struct drm_i915_gem_object *obj)
{
struct intel_mmio_flip *mmio_flip;
 
11334,6 → 11465,7
mmio_flip->i915 = to_i915(dev);
mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
mmio_flip->crtc = to_intel_crtc(crtc);
mmio_flip->rotation = crtc->primary->state->rotation;
 
INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
schedule_work(&mmio_flip->work);
11400,6 → 11532,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
 
WARN_ON(!in_interrupt());
 
if (crtc == NULL)
return;
 
11515,7 → 11649,7
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
11537,9 → 11671,14
* synchronisation, so all we want here is to pin the framebuffer
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, ring, &request);
if (ret)
goto cleanup_pending;
}
 
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
crtc->primary->state,
mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
crtc->primary->state);
if (ret)
goto cleanup_pending;
 
11548,8 → 11687,7
work->gtt_offset += intel_crtc->dspaddr_offset;
 
if (mmio_flip) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
ret = intel_queue_mmio_flip(dev, crtc, obj);
if (ret)
goto cleanup_unpin;
 
11580,7 → 11718,7
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
 
intel_fbc_disable_crtc(intel_crtc);
intel_fbc_deactivate(intel_crtc);
intel_frontbuffer_flip_prepare(dev,
to_intel_plane(primary)->frontbuffer_bit);
 
11663,21 → 11801,41
static bool intel_wm_need_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
/* Update watermarks on tiling changes. */
if (!plane->state->fb || !state->fb ||
plane->state->fb->modifier[0] != state->fb->modifier[0] ||
plane->state->rotation != state->rotation)
struct intel_plane_state *new = to_intel_plane_state(state);
struct intel_plane_state *cur = to_intel_plane_state(plane->state);
 
/* Update watermarks on tiling or size changes. */
if (new->visible != cur->visible)
return true;
 
if (plane->state->crtc_w != state->crtc_w)
if (!cur->base.fb || !new->base.fb)
return false;
 
if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
cur->base.rotation != new->base.rotation ||
drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
return true;
 
return false;
}
 
static bool needs_scaling(struct intel_plane_state *state)
{
int src_w = drm_rect_width(&state->src) >> 16;
int src_h = drm_rect_height(&state->src) >> 16;
int dst_w = drm_rect_width(&state->dst);
int dst_h = drm_rect_height(&state->dst);
 
return (src_w != dst_w || src_h != dst_h);
}
 
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
struct drm_crtc *crtc = crtc_state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
11690,7 → 11848,6
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
 
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
 
11703,14 → 11860,6
return ret;
}
 
/*
* Disabling a plane is always okay; we just need to update
* fb tracking in a special way since cleanup_fb() won't
* get called by the plane helpers.
*/
if (old_plane_state->base.fb && !fb)
intel_crtc->atomic.disabled_planes |= 1 << i;
 
was_visible = old_plane_state->visible;
visible = to_intel_plane_state(plane_state)->visible;
 
11734,24 → 11883,24
turn_off, turn_on, mode_changed);
 
if (turn_on) {
intel_crtc->atomic.update_wm_pre = true;
pipe_config->update_wm_pre = true;
 
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
intel_crtc->atomic.disable_cxsr = true;
/* to potentially re-enable cxsr */
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_wm_post = true;
}
if (plane->type != DRM_PLANE_TYPE_CURSOR)
pipe_config->disable_cxsr = true;
} else if (turn_off) {
intel_crtc->atomic.update_wm_post = true;
pipe_config->update_wm_post = true;
 
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
if (is_crtc_enabled)
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.disable_cxsr = true;
pipe_config->disable_cxsr = true;
}
} else if (intel_wm_need_update(plane, plane_state)) {
intel_crtc->atomic.update_wm_pre = true;
/* FIXME bollocks */
pipe_config->update_wm_pre = true;
pipe_config->update_wm_post = true;
}
 
if (visible || was_visible)
11760,7 → 11909,6
 
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
intel_crtc->atomic.wait_for_flips = true;
intel_crtc->atomic.pre_disable_primary = turn_off;
intel_crtc->atomic.post_enable_primary = turn_on;
 
11808,11 → 11956,23
case DRM_PLANE_TYPE_CURSOR:
break;
case DRM_PLANE_TYPE_OVERLAY:
if (turn_off && !mode_changed) {
/*
* WaCxSRDisabledForSpriteScaling:ivb
*
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
if (IS_IVYBRIDGE(dev) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state)) {
to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
} else if (turn_off && !mode_changed) {
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_sprite_watermarks |=
1 << i;
}
 
break;
}
return 0;
}
11885,7 → 12045,7
}
 
if (mode_changed && !crtc_state->active)
intel_crtc->atomic.update_wm_post = true;
pipe_config->update_wm_post = true;
 
if (mode_changed && crtc_state->enable &&
dev_priv->display.crtc_compute_clock &&
11897,6 → 12057,12
}
 
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
if (ret)
return ret;
}
 
if (INTEL_INFO(dev)->gen >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
11952,13 → 12118,23
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
 
/* Clamp bpp to 8 on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0 && bpp > 24) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
bpp);
pipe_config->pipe_bpp = 24;
/* Clamp bpp to default limit on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0) {
int type = connector->base.connector_type;
int clamp_bpp = 24;
 
/* Fall back to 18 bpp when DP sink capability is unknown. */
if (type == DRM_MODE_CONNECTOR_DisplayPort ||
type == DRM_MODE_CONNECTOR_eDP)
clamp_bpp = 18;
 
if (bpp > clamp_bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
bpp, clamp_bpp);
pipe_config->pipe_bpp = clamp_bpp;
}
}
}
 
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11970,7 → 12146,7
struct drm_connector_state *connector_state;
int bpp, i;
 
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
bpp = 10*3;
else if (INTEL_INFO(dev)->gen >= 5)
bpp = 12*3;
12086,7 → 12262,7
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->ddi_pll_sel,
12344,8 → 12520,20
crtc->hwmode = crtc->state->adjusted_mode;
else
crtc->hwmode.crtc_clock = 0;
 
/*
* Update legacy state to satisfy fbc code. This can
* be removed when fbc uses the atomic state.
*/
if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
struct drm_plane_state *plane_state = crtc->primary->state;
 
crtc->primary->fb = plane_state->fb;
crtc->x = plane_state->src_x >> 16;
crtc->y = plane_state->src_y >> 16;
}
}
}
 
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
12369,7 → 12557,7
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe))
for_each_if (mask & (1 <<(intel_crtc)->pipe))
 
static bool
intel_compare_m_n(unsigned int m, unsigned int n,
12553,6 → 12741,8
} else
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
PIPE_CONF_CHECK_I(has_dsi_encoder);
 
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12570,7 → 12760,7
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
 
13107,6 → 13297,45
return 0;
}
 
/*
* Handle calculation of various watermark data at the end of the atomic check
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
static void calc_watermark_data(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_plane *plane;
struct drm_plane_state *pstate;
 
/*
* Calculate watermark configuration details now that derived
* plane/crtc state is all properly updated.
*/
drm_for_each_crtc(crtc, dev) {
cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
crtc->state;
 
if (cstate->active)
intel_state->wm_config.num_pipes_active++;
}
drm_for_each_legacy_plane(plane, dev) {
pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
plane->state;
 
if (!to_intel_plane_state(pstate)->visible)
continue;
 
intel_state->wm_config.sprites_enabled = true;
if (pstate->crtc_w != pstate->src_w >> 16 ||
pstate->crtc_h != pstate->src_h >> 16)
intel_state->wm_config.sprites_scaled = true;
}
}
 
/**
* intel_atomic_check - validate state object
* @dev: drm device
13115,6 → 13344,7
static int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret, i;
13182,12 → 13412,86
if (ret)
return ret;
} else
to_intel_atomic_state(state)->cdclk =
to_i915(state->dev)->cdclk_freq;
intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
 
return drm_atomic_helper_check_planes(state->dev, state);
ret = drm_atomic_helper_check_planes(state->dev, state);
if (ret)
return ret;
 
calc_watermark_data(state);
 
return 0;
}
 
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
 
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (state->legacy_cursor_update)
continue;
 
ret = intel_crtc_wait_for_pending_flips(crtc);
if (ret)
return ret;
 
// if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
// flush_workqueue(dev_priv->wq);
}
 
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
 
ret = drm_atomic_helper_prepare_planes(dev, state);
if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
u32 reset_counter;
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
 
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
 
if (!intel_plane_state->wait_req)
continue;
 
ret = __i915_wait_request(intel_plane_state->wait_req,
reset_counter, true,
NULL, NULL);
 
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret == -EIO)
ret = 0;
 
if (ret)
break;
}
 
if (!ret)
return 0;
 
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
}
 
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
13209,22 → 13513,20
bool async)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret = 0;
int i;
bool any_ms = false;
 
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
ret = intel_atomic_prepare_commit(dev, state, async);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
return ret;
}
 
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
 
drm_atomic_helper_swap_state(dev, state);
dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13240,6 → 13542,16
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_disable_shared_dpll(intel_crtc);
 
/*
* Underruns don't always raise
* interrupts, so check manually.
*/
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
 
if (!crtc->state->active)
intel_update_watermarks(crtc);
}
}
 
13262,6 → 13574,9
to_intel_crtc_state(crtc->state)->update_pipe;
unsigned long put_domains = 0;
 
if (modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
13277,6 → 13592,8
if (!modeset)
intel_pre_plane_update(intel_crtc);
 
if (crtc->state->active &&
(crtc->state->planes_changed || update_pipe))
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
 
if (put_domains)
13283,12 → 13600,18
modeset_put_power_domains(dev_priv, put_domains);
 
intel_post_plane_update(intel_crtc);
 
if (modeset)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
 
/* FIXME: add subpixel order */
 
drm_atomic_helper_wait_for_vblanks(dev, state);
 
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
 
if (any_ms)
intel_modeset_check_state(dev, state);
13353,7 → 13676,7
{
uint32_t val;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(PCH_DPLL(pll->id));
13361,6 → 13684,8
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
return val & DPLL_VCO_ENABLE;
}
 
13457,6 → 13782,8
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
* Must be called with struct_mutex held.
*
* Returns 0 on success, negative error code on failure.
*/
int
13467,29 → 13794,61
struct drm_framebuffer *fb = new_state->fb;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret = 0;
 
if (!obj)
if (!obj && !old_obj)
return 0;
 
mutex_lock(&dev->struct_mutex);
if (old_obj) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
 
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
if (needs_modeset(crtc_state))
ret = i915_gem_object_wait_rendering(old_obj, true);
 
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret && ret != -EIO)
return ret;
}
 
/* For framebuffer backed by dmabuf, wait for fence */
 
if (!obj) {
ret = 0;
} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = 1;
ret = i915_gem_object_attach_phys(obj, align);
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
}
 
if (ret == 0)
if (ret == 0) {
if (obj) {
struct intel_plane_state *plane_state =
to_intel_plane_state(new_state);
 
i915_gem_request_assign(&plane_state->wait_req,
obj->last_write_req);
}
 
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
}
 
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
13499,6 → 13858,8
* @fb: old framebuffer that was on plane
*
* Cleans up a framebuffer that has just been removed from a plane.
*
* Must be called with struct_mutex held.
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
13505,18 → 13866,28
const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
 
if (!obj)
old_intel_state = to_intel_plane_state(old_state);
 
if (!obj && !old_obj)
return;
 
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state);
mutex_unlock(&dev->struct_mutex);
 
/* prepare_fb aborted? */
if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
(obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 
}
}
 
int
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13534,7 → 13905,7
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
if (!crtc_clock || !cdclk)
if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
return DRM_PLANE_HELPER_NO_SCALING;
 
/*
13583,19 → 13954,9
struct drm_framebuffer *fb = state->base.fb;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
struct drm_rect *src = &state->src;
 
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
 
plane->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
 
if (!crtc->state->active)
return;
 
dev_priv->display.update_primary_plane(crtc, fb,
state->src.x1 >> 16,
state->src.y1 >> 16);
13620,11 → 13981,7
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
 
if (intel_crtc->atomic.update_wm_pre)
intel_update_watermarks(crtc);
 
/* Perform vblank evasion around commit operation */
if (crtc->state->active)
intel_pipe_update_start(intel_crtc);
 
if (modeset)
13641,7 → 13998,6
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (crtc->state->active)
intel_pipe_update_end(intel_crtc);
}
 
13719,7 → 14075,7
drm_universal_plane_init(dev, &primary->base, 0,
&intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
DRM_PLANE_TYPE_PRIMARY, NULL);
 
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
13871,7 → 14227,7
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
DRM_PLANE_TYPE_CURSOR, NULL);
 
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
13948,7 → 14304,7
goto fail;
 
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
cursor, &intel_crtc_funcs);
cursor, &intel_crtc_funcs, NULL);
if (ret)
goto fail;
 
14074,9 → 14430,16
if (IS_CHERRYVIEW(dev))
return false;
 
if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
 
/* DDI E can't be used if DDI A requires 4 lanes */
if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
 
if (!dev_priv->vbt.int_crt_support)
return false;
 
return true;
}
 
14110,7 → 14473,7
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_SKYLAKE(dev))
if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
intel_ddi_init(dev, PORT_A);
 
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14126,7 → 14489,7
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
if (IS_SKYLAKE(dev) &&
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14141,7 → 14504,7
 
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
if (!found)
intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14159,9 → 14522,7
 
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
bool has_edp, has_port;
 
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
14170,37 → 14531,27
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*
* Sadly the straps seem to be missing sometimes even for HDMI
* ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
* and VBT for the presence of the port. Additionally we can't
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
has_edp = intel_dp_is_edp(dev, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
!intel_dp_is_edp(dev, PORT_B))
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_B))
intel_dp_init(dev, VLV_DP_B, PORT_B);
 
has_edp = intel_dp_is_edp(dev, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
!intel_dp_is_edp(dev, PORT_C))
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_C))
intel_dp_init(dev, VLV_DP_C, PORT_C);
 
if (IS_CHERRYVIEW(dev)) {
/*
* eDP not supported on port D,
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
/* eDP not supported on port D, so don't check VBT */
if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED)
intel_dp_init(dev, CHV_DP_D, PORT_D);
if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
 
intel_dsi_init(dev);
14209,7 → 14560,7
 
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, true);
found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14223,7 → 14574,7
 
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, GEN3_SDVOC, false);
found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
}
 
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14242,6 → 14593,9
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
// if (SUPPORTS_TV(dev))
// intel_tv_init(dev);
 
intel_psr_init(dev);
 
for_each_intel_encoder(dev, encoder) {
14317,7 → 14671,7
* pixels and 32K bytes."
*/
return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14421,7 → 14775,8
}
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
INTEL_INFO(dev)->gen < 9) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
14437,7 → 14792,7
}
break;
case DRM_FORMAT_ABGR2101010:
if (!IS_VALLEYVIEW(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
14486,8 → 14841,9
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd2 *user_mode_cmd)
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
14496,7 → 14852,11
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
 
return intel_framebuffer_create(dev, &mode_cmd, obj);
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
drm_gem_object_unreference_unlocked(&obj->base);
 
return fb;
}
 
#ifndef CONFIG_DRM_FBDEV_EMULATION
14560,7 → 14920,7
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
14581,7 → 14941,7
}
 
/* Returns the core display clock speed */
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
else if (IS_BROXTON(dev))
14593,7 → 14953,7
else if (IS_HASWELL(dev))
dev_priv->display.get_display_clock_speed =
haswell_get_display_clock_speed;
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
else if (IS_GEN5(dev))
14621,9 → 14981,6
else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_PINEVIEW(dev))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
14654,7 → 15011,7
dev_priv->display.modeset_calc_cdclk =
broadwell_modeset_calc_cdclk;
}
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
14870,7 → 15227,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg = i915_vgacntrl_reg(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14982,9 → 15339,6
i915_disable_vga(dev);
intel_setup_outputs(dev);
 
/* Just in case the BIOS is doing something questionable. */
intel_fbc_disable(dev_priv);
 
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev);
drm_modeset_unlock_all(dev);
15071,10 → 15425,9
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
 
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->config->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* restore vblank interrupts to correct state */
15141,6 → 15494,7
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
crtc->base.state->connector_mask = 0;
 
/* Because we only establish the connector -> encoder ->
* crtc links if something is active, this means the
15228,7 → 15582,7
void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15247,10 → 15601,12
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
 
i915_redisable_vga_power_on(dev);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
 
static bool primary_get_hw_state(struct intel_plane *plane)
15267,7 → 15623,7
struct intel_plane_state *plane_state =
to_intel_plane_state(primary->state);
 
plane_state->visible =
plane_state->visible = crtc->active &&
primary_get_hw_state(to_intel_plane(primary));
 
if (plane_state->visible)
15343,7 → 15699,21
for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->base.encoder = &connector->encoder->base;
 
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
 
if (encoder->base.crtc &&
encoder->base.crtc->state->active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
encoder->base.crtc->state->connector_mask |=
1 << drm_connector_index(&connector->base);
}
 
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
15428,7 → 15798,7
pll->on = false;
}
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_wm_get_hw_state(dev);
else if (IS_GEN9(dev))
skl_wm_get_hw_state(dev);
15524,8 → 15894,7
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
c->primary->state,
NULL, NULL);
c->primary->state);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
15553,7 → 15922,7
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_connector *connector;
 
intel_disable_gt_powersave(dev);
 
15580,13 → 15949,9
flush_scheduled_work();
 
/* destroy the backlight and sysfs files before encoders/connectors */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct intel_connector *intel_connector;
for_each_intel_connector(dev, connector)
connector->unregister(connector);
 
intel_connector = to_intel_connector(connector);
intel_connector->unregister(intel_connector);
}
 
drm_mode_config_cleanup(dev);
 
intel_cleanup_overlay(dev);