Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4103 → Rev 4104

/drivers/video/drm/i915/intel_display.c
41,29 → 41,22
#include <drm/drm_crtc_helper.h>
//#include <linux/dma_remapping.h>
 
#define MAX_ERRNO 4095
phys_addr_t get_bus_addr(void);
 
 
#define MAX_ERRNO 4095
 
 
 
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
typedef struct {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
 
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
 
 
typedef struct {
int min, max;
} intel_range_t;
73,29 → 66,10
int p2_slow, p2_fast;
} intel_p2_t;
 
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
/**
* find_pll() - Find the best values for the PLL
* @limit: limits for the PLL
* @crtc: current CRTC
* @target: target frequency in kHz
* @refclk: reference clock frequency in kHz
* @match_clock: if provided, @best_clock P divider must
* match the P divider from @match_clock
* used for LVDS downclocking
* @best_clock: best PLL values found
*
* Returns true on success, false on failure.
*/
bool (*find_pll)(const intel_limit_t *limit,
struct drm_crtc *crtc,
int target, int refclk,
intel_clock_t *match_clock,
intel_clock_t *best_clock);
};
 
/* FDI */
111,29 → 85,6
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
}
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static bool
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
144,7 → 95,7
return 27;
}
 
static const intel_limit_t intel_limits_i8xx_dvo = {
static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
155,9 → 106,21
.p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 2 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
.p = { .min = 4, .max = 128 },
.p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 4 },
};
 
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
169,7 → 132,6
.p1 = { .min = 1, .max = 6 },
.p2 = { .dot_limit = 165000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_i9xx_sdvo = {
183,7 → 145,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_i9xx_lvds = {
197,7 → 158,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
 
 
214,7 → 174,6
.p2_slow = 10,
.p2_fast = 10
},
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_hdmi = {
228,7 → 187,6
.p1 = { .min = 1, .max = 8},
.p2 = { .dot_limit = 165000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
243,7 → 201,6
.p2 = { .dot_limit = 0,
.p2_slow = 14, .p2_fast = 14
},
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
258,23 → 215,8
.p2 = { .dot_limit = 0,
.p2_slow = 7, .p2_fast = 7
},
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_display_port = {
.dot = { .min = 161670, .max = 227000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 2 },
.m = { .min = 97, .max = 108 },
.m1 = { .min = 0x10, .max = 0x12 },
.m2 = { .min = 0x05, .max = 0x06 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_g4x_dp,
};
 
static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
288,7 → 230,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_pineview_lvds = {
302,7 → 243,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_find_best_PLL,
};
 
/* Ironlake / Sandybridge
321,7 → 261,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_single_lvds = {
335,7 → 274,6
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
349,7 → 287,6
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
 
/* LVDS 100mhz refclk limits. */
364,7 → 301,6
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
378,23 → 314,8
.p1 = { .min = 2, .max = 6 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_display_port = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000},
.n = { .min = 1, .max = 2 },
.m = { .min = 81, .max = 90 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_ironlake_dp,
};
 
static const intel_limit_t intel_limits_vlv_dac = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
403,15 → 324,14
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p1 = { .min = 1, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
.vco = { .min = 4000000, .max = 5994000},
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
420,7 → 340,6
.p1 = { .min = 2, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
static const intel_limit_t intel_limits_vlv_dp = {
431,61 → 350,11
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p1 = { .min = 1, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
return 0;
}
 
I915_WRITE(DPIO_REG, reg);
I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
return 0;
}
 
return I915_READ(DPIO_DATA);
}
 
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
return;
}
 
I915_WRITE(DPIO_DATA, val);
I915_WRITE(DPIO_REG, reg);
I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
}
 
static void vlv_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Reset the DPIO config */
I915_WRITE(DPIO_CTL, 0);
POSTING_READ(DPIO_CTL);
I915_WRITE(DPIO_CTL, 1);
POSTING_READ(DPIO_CTL);
}
 
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
504,10 → 373,7
else
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
} else
limit = &intel_limits_ironlake_dac;
 
return limit;
528,8 → 394,6
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
 
565,8 → 429,10
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dvo;
limit = &intel_limits_i8xx_dac;
}
return limit;
}
580,13 → 446,14
clock->dot = clock->vco / clock->p;
}
 
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
{
if (IS_PINEVIEW(dev)) {
pineview_clock(refclk, clock);
return;
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
 
static void i9xx_clock(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
643,10 → 510,9
}
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
 
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
675,8 → 541,7
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
/* m1 is always 0 in Pineview */
if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
if (clock.m2 >= clock.m1)
break;
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
684,7 → 549,7
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
 
intel_clock(dev, refclk, &clock);
i9xx_clock(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
706,12 → 571,71
}
 
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
int err = target;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
 
memset(best_clock, 0, sizeof(*best_clock));
 
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
 
pineview_clock(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
if (match_clock &&
clock.p != match_clock->p)
continue;
 
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
}
}
 
return (err != target);
}
 
static bool
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
int max_n;
bool found;
/* approximately equals target * 0.00585 */
719,12 → 643,6
found = false;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
 
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
749,13 → 667,10
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
 
intel_clock(dev, refclk, &clock);
i9xx_clock(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
if (match_clock &&
clock.p != match_clock->p)
continue;
 
this_err = abs(clock.dot - target);
if (this_err < err_most) {
772,66 → 687,13
}
 
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
 
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
clock.p2 = 10;
clock.m1 = 12;
clock.m2 = 9;
} else {
clock.n = 2;
clock.p1 = 1;
clock.p2 = 10;
clock.m1 = 14;
clock.m2 = 8;
}
intel_clock(dev, refclk, &clock);
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
 
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 2;
clock.m1 = 23;
clock.m2 = 8;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 14;
clock.m2 = 2;
}
clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
clock.p = (clock.p1 * clock.p2);
clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
clock.vco = 0;
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
static bool
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
u32 m, n, fastclk;
u32 updrate, minupdate, fracbits, p;
u32 updrate, minupdate, p;
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
 
842,7 → 704,6
fastclk = dotclk / (2*100);
updrate = 0;
minupdate = 19200;
fracbits = 1;
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
bestm1 = bestm2 = bestp1 = bestp2 = 0;
 
1056,7 → 917,7
}
 
/* Only for pre-ILK configs */
static void assert_pll(struct drm_i915_private *dev_priv,
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
1070,17 → 931,25
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
 
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (crtc->config.shared_dpll < 0)
return NULL;
 
return &dev_priv->shared_dplls[crtc->config.shared_dpll];
}
 
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
struct intel_pch_pll *pll,
struct intel_crtc *crtc,
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state)
{
u32 val;
bool cur_state;
struct intel_dpll_hw_state hw_state;
 
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1088,36 → 957,14
}
 
if (WARN (!pll,
"asserting PCH PLL %s with no PLL\n", state_string(state)))
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
 
val = I915_READ(pll->pll_reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
WARN(cur_state != state,
"PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
pll->pll_reg, state_string(state), state_string(cur_state), val);
 
/* Make sure the selected PLL is correctly attached to the transcoder */
if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
 
pch_dpll = I915_READ(PCH_DPLL_SEL);
cur_state = pll->pll_reg == _PCH_DPLL_B;
if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
"PLL[%d] not attached to this transcoder %d: %08x\n",
cur_state, crtc->pipe, pch_dpll)) {
cur_state = !!(val >> (4*crtc->pipe + 3));
WARN(cur_state != state,
"PLL[%d] not %s on this transcoder %d: %08x\n",
pll->pll_reg == _PCH_DPLL_B,
state_string(state),
crtc->pipe,
val);
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
}
}
#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
1181,15 → 1028,19
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
 
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
cur_state = !!(val & FDI_RX_PLL_ENABLE);
WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
 
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1234,8 → 1085,8
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
 
if (!intel_using_power_well(dev_priv->dev) &&
cpu_transcoder != TRANSCODER_EDP) {
if (!intel_display_power_enabled(dev_priv->dev,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
reg = PIPECONF(cpu_transcoder);
1269,12 → 1120,13
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
u32 val;
int cur_pipe;
 
/* Planes are fixed to pipes on ILK+ */
if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
/* Primary planes are fixed to pipes on gen4+ */
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
WARN((val & DISPLAY_PLANE_ENABLE),
1284,7 → 1136,7
}
 
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
for_each_pipe(i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1298,21 → 1150,32
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
u32 val;
 
if (!IS_VALLEYVIEW(dev_priv->dev))
return;
 
/* Need to check both planes against the pipe */
if (IS_VALLEYVIEW(dev)) {
for (i = 0; i < dev_priv->num_plane; i++) {
reg = SPCNTR(pipe, i);
val = I915_READ(reg);
WARN((val & SP_ENABLE),
"sprite %d assertion failure, should be off on pipe %c but is still active\n",
pipe * 2 + i, pipe_name(pipe));
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, i), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
WARN((val & SPRITE_ENABLE),
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
WARN((val & DVS_ENABLE),
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
}
 
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
1330,7 → 1193,7
WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
 
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
1337,7 → 1200,7
u32 val;
bool enabled;
 
reg = TRANSCONF(pipe);
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
WARN(enabled,
1463,49 → 1326,92
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
 
/**
* intel_enable_pll - enable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
* make sure the PLL reg is writable first though, since the panel write
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
*
* Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
static void vlv_enable_pll(struct intel_crtc *crtc)
{
int reg;
u32 val;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
u32 dpll = crtc->config.dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
assert_panel_unlocked(dev_priv, pipe);
assert_panel_unlocked(dev_priv, crtc->pipe);
 
reg = DPLL(pipe);
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150);
 
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
 
I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(crtc->pipe));
 
/* We do this three times for luck */
I915_WRITE(reg, val);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
 
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
u32 dpll = crtc->config.dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(dev_priv->info->gen >= 5);
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
 
I915_WRITE(reg, dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(reg);
udelay(150);
 
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config.dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(reg, dpll);
}
 
/* We do this three times for luck */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
 
/**
* intel_disable_pll - disable a PLL
* i9xx_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
1513,11 → 1419,8
*
* Note! This is for pre-ILK only.
*/
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
int reg;
u32 val;
 
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
1525,76 → 1428,26
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
 
reg = DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
I915_WRITE(DPLL(pipe), 0);
POSTING_READ(DPLL(pipe));
}
 
/* SBI access */
static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
{
u32 tmp;
u32 port_mask;
 
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
 
if (destination == SBI_ICLK)
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
if (!port)
port_mask = DPLL_PORTB_READY_MASK;
else
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
port_mask = DPLL_PORTC_READY_MASK;
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
'B' + port, I915_READ(DPLL(0)));
}
}
 
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
 
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
 
return I915_READ(SBI_DATA);
}
 
/**
* ironlake_enable_pch_pll - enable PCH PLL
* ironlake_enable_shared_dpll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
1601,87 → 1454,64
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
int reg;
u32 val;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH PLLs only available on ILK, SNB and IVB */
BUG_ON(dev_priv->info->gen < 5);
pll = intel_crtc->pch_pll;
if (pll == NULL)
if (WARN_ON(pll == NULL))
return;
 
if (WARN_ON(pll->refcount == 0))
return;
 
DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
 
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
 
if (pll->active++ && pll->on) {
assert_pch_pll_enabled(dev_priv, pll, NULL);
if (pll->active++) {
WARN_ON(!pll->on);
assert_shared_dpll_enabled(dev_priv, pll);
return;
}
WARN_ON(pll->on);
 
DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
 
reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
 
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
 
static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
u32 val;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
if (pll == NULL)
if (WARN_ON(pll == NULL))
return;
 
if (WARN_ON(pll->refcount == 0))
return;
 
DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
 
if (WARN_ON(pll->active == 0)) {
assert_pch_pll_disabled(dev_priv, pll, NULL);
assert_shared_dpll_disabled(dev_priv, pll);
return;
}
 
if (--pll->active) {
assert_pch_pll_enabled(dev_priv, pll, NULL);
assert_shared_dpll_enabled(dev_priv, pll);
WARN_ON(!pll->on);
if (--pll->active)
return;
}
 
DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
 
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
 
reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
 
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
}
 
1690,6 → 1520,7
{
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t reg, val, pipeconf_val;
 
/* PCH only available on ILK+ */
1696,9 → 1527,8
BUG_ON(dev_priv->info->gen < 5);
 
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv,
to_intel_crtc(crtc)->pch_pll,
to_intel_crtc(crtc));
assert_shared_dpll_enabled(dev_priv,
intel_crtc_to_shared_dpll(intel_crtc));
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
1713,7 → 1543,7
I915_WRITE(reg, val);
}
 
reg = TRANSCONF(pipe);
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
 
1738,7 → 1568,7
 
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
 
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1767,8 → 1597,8
else
val |= TRANS_PROGRESSIVE;
 
I915_WRITE(TRANSCONF(TRANSCODER_A), val);
if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
I915_WRITE(LPT_TRANSCONF, val);
if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
 
1785,13 → 1615,13
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
 
reg = TRANSCONF(pipe);
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
if (!HAS_PCH_IBX(dev)) {
/* Workaround: Clear the timing override chicken bit again. */
1806,11 → 1636,11
{
u32 val;
 
val = I915_READ(_TRANSACONF);
val = I915_READ(LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
I915_WRITE(_TRANSACONF, val);
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
 
/* Workaround: clear timing override bit. */
1842,6 → 1672,9
int reg;
u32 val;
 
assert_planes_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
 
if (HAS_PCH_LPT(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
2046,7 → 1879,7
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin_from_display_plane(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
2103,7 → 1936,7
case 1:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
return -EINVAL;
}
 
2152,6 → 1985,9
dspcntr &= ~DISPPLANE_TILED;
}
 
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
I915_WRITE(reg, dspcntr);
 
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2166,16 → 2002,17
intel_crtc->dspaddr_offset = linear_offset;
}
 
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
 
return 0;
2200,7 → 2037,7
case 2:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
return -EINVAL;
}
 
2243,7 → 2080,9
else
dspcntr &= ~DISPPLANE_TILED;
 
/* must disable */
if (IS_HASWELL(dev))
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
else
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
I915_WRITE(reg, dspcntr);
2255,11 → 2094,12
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
 
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
2287,6 → 2127,44
}
 
#if 0
void intel_display_handle_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
/*
* Flips in the rings have been nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*
* Also update the base address of all primary
* planes to the the last fb to make sure we're
* showing the correct fb after a reset.
*
* Need to make two loops over the crtcs so that we
* don't try to grab a crtc mutex before the
* pending_flip_queue really got woken up.
*/
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
 
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
mutex_lock(&crtc->mutex);
if (intel_crtc->active)
dev_priv->display.update_plane(crtc, crtc->fb,
crtc->x, crtc->y);
mutex_unlock(&crtc->mutex);
}
}
 
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
2309,6 → 2187,33
 
return ret;
}
 
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (!dev->primary->master)
return;
 
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
 
switch (intel_crtc->pipe) {
case 0:
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
break;
case 1:
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
break;
default:
break;
}
}
#endif
 
static int
2328,8 → 2233,8
}
 
if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
intel_crtc->plane,
DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
plane_name(intel_crtc->plane),
INTEL_INFO(dev)->num_pipes);
return -EINVAL;
}
2359,11 → 2264,13
crtc->y = y;
 
if (old_fb) {
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
 
intel_update_fbc(dev);
intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
 
return 0;
2410,6 → 2317,11
FDI_FE_ERRC_ENABLE);
}
 
static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
{
return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
}
 
static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2419,10 → 2331,13
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
uint32_t temp;
 
/* When everything is off disable fdi C so that we could enable fdi B
* with all lanes. XXX: This misses the case where a pipe is not using
* any pch resources and so doesn't need any fdi lanes. */
if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
/*
* When everything is off disable fdi C so that we could enable fdi B
* with all lanes. Note that we don't care about enabled pipes without
* an enabled pch encoder.
*/
if (!pipe_has_enabled_pch(pipe_B_crtc) &&
!pipe_has_enabled_pch(pipe_C_crtc)) {
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
2460,8 → 2375,8
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
2558,8 → 2473,8
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2674,7 → 2589,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
u32 reg, temp, i, j;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
2690,15 → 2605,30
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
I915_READ(FDI_RX_IIR(pipe)));
 
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
I915_WRITE(reg, temp);
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
I915_WRITE(reg, temp);
 
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
2707,25 → 2637,14
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
 
POSTING_READ(reg);
udelay(150);
udelay(1); /* should be 0.5us */
 
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
udelay(500);
 
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2733,12 → 2652,16
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
i);
break;
}
udelay(1); /* should be 0.5us */
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
if (i == 4) {
DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
 
/* Train 2 */
reg = FDI_TX_CTL(pipe);
2745,8 → 2668,6
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp);
 
reg = FDI_RX_CTL(pipe);
2756,31 → 2677,27
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
udelay(150);
udelay(2); /* should be 1.5us */
 
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
udelay(500);
 
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
if (temp & FDI_RX_SYMBOL_LOCK) {
if (temp & FDI_RX_SYMBOL_LOCK ||
(I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
i);
goto train_done;
}
udelay(2); /* should be 1.5us */
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
}
 
train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
 
2795,8 → 2712,8
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
2942,8 → 2859,6
}
#endif
 
 
 
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
3032,6 → 2947,30
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
enum pipe pch_transcoder)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
 
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
I915_READ(HBLANK(cpu_transcoder)));
I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
I915_READ(HSYNC(cpu_transcoder)));
 
I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
I915_READ(VTOTAL(cpu_transcoder)));
I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
I915_READ(VBLANK(cpu_transcoder)));
I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
I915_READ(VSYNC(cpu_transcoder)));
I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
 
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
3048,7 → 2987,7
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
assert_transcoder_disabled(dev_priv, pipe);
assert_pch_transcoder_disabled(dev_priv, pipe);
 
/* Write the TU size bits before fdi link training, so that error
* detection works. */
3058,35 → 2997,15
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
 
/* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_pch_pll tries to do the right thing, but get_pch_pll
* unconditionally resets the pll - we need that to have the right LVDS
* enable sequence. */
ironlake_enable_pch_pll(intel_crtc);
 
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
if (HAS_PCH_CPT(dev)) {
u32 sel;
 
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
default:
case 0:
temp |= TRANSA_DPLL_ENABLE;
sel = TRANSA_DPLLB_SEL;
break;
case 1:
temp |= TRANSB_DPLL_ENABLE;
sel = TRANSB_DPLLB_SEL;
break;
case 2:
temp |= TRANSC_DPLL_ENABLE;
sel = TRANSC_DPLLB_SEL;
break;
}
if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
temp |= sel;
else
temp &= ~sel;
3093,17 → 3012,19
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
/* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
ironlake_enable_shared_dpll(intel_crtc);
 
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
 
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
 
intel_fdi_normal_train(crtc);
 
/* For PCH DP, enable TRANS_DP_CTL */
3152,75 → 3073,71
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
assert_transcoder_disabled(dev_priv, TRANSCODER_A);
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
 
lpt_program_iclkip(crtc);
 
/* Set transcoder timing. */
I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
 
I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
 
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
 
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
static void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_pch_pll *pll = intel_crtc->pch_pll;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
if (pll == NULL)
return;
 
if (pll->refcount == 0) {
WARN(1, "bad PCH PLL refcount\n");
WARN(1, "bad %s refcount\n", pll->name);
return;
}
 
--pll->refcount;
intel_crtc->pch_pll = NULL;
if (--pll->refcount == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
 
static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
 
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
int i;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
enum intel_dpll_id i;
 
pll = intel_crtc->pch_pll;
if (pll) {
DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
goto prepare;
DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
crtc->base.base.id, pll->name);
intel_put_shared_dpll(crtc);
}
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = intel_crtc->pipe;
pll = &dev_priv->pch_plls[i];
i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
 
DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
 
goto found;
}
 
for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
 
/* Only want to check enabled timings first */
if (pll->refcount == 0)
continue;
 
if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
fp == I915_READ(pll->fp0_reg)) {
DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
intel_crtc->base.base.id,
pll->pll_reg, pll->refcount, pll->active);
if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
sizeof(pll->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
crtc->base.base.id,
pll->name, pll->refcount, pll->active);
 
goto found;
}
3227,11 → 3144,11
}
 
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
if (pll->refcount == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
}
}
3239,24 → 3156,26
return NULL;
 
found:
intel_crtc->pch_pll = pll;
crtc->config.shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
 
if (pll->active == 0) {
memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
sizeof(pll->hw_state));
 
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
 
pll->mode_set(dev_priv, pll);
}
pll->refcount++;
DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
prepare: /* separate function? */
DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
 
/* Wait for the clocks to stabilize before rewriting the regs */
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(pll->pll_reg);
udelay(150);
 
I915_WRITE(pll->fp0_reg, fp);
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
pll->on = false;
return pll;
}
 
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe);
3266,10 → 3185,53
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
}
}
 
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
if (crtc->config.pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
}
}
 
static void intel_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct intel_plane *intel_plane;
 
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
if (intel_plane->pipe == pipe)
intel_plane_restore(&intel_plane->base);
}
 
static void intel_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct intel_plane *intel_plane;
 
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
if (intel_plane->pipe == pipe)
intel_plane_disable(&intel_plane->base);
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3278,7 → 3240,6
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
 
WARN_ON(!crtc->enabled);
 
3286,15 → 3247,16
return;
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
intel_update_watermarks(dev);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
if ((temp & LVDS_PORT_EN) == 0)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
 
if (intel_crtc->config.has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
3305,27 → 3267,8
assert_fdi_rx_disabled(dev_priv, pipe);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
ironlake_pfit_enable(intel_crtc);
 
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
 
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
3335,6 → 3278,8
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
// intel_crtc_update_cursor(crtc, true);
 
if (intel_crtc->config.has_pch_encoder)
ironlake_pch_enable(crtc);
3343,13 → 3288,11
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
cpt_verify_modeset(dev, intel_crtc->pipe);
 
/*
* There seems to be a race in PCH platform hw (at least on some
3362,6 → 3305,42
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
 
static void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
/* We can only enable IPS after we enable a plane and wait for a vblank.
* We guarantee that the plane is enabled by calling intel_enable_ips
* only after intel_enable_plane. And intel_enable_plane already waits
* for a vblank, so all we need to do here is to enable the IPS bit. */
assert_plane_enabled(dev_priv, crtc->plane);
I915_WRITE(IPS_CTL, IPS_ENABLE);
}
 
static void hsw_disable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
assert_plane_enabled(dev_priv, crtc->plane);
I915_WRITE(IPS_CTL, 0);
 
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev, crtc->pipe);
}
 
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3377,6 → 3356,11
return;
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
intel_update_watermarks(dev);
 
if (intel_crtc->config.has_pch_encoder)
3388,18 → 3372,7
 
intel_ddi_enable_pipe_clock(intel_crtc);
 
/* Enable panel fitting for eDP */
if (dev_priv->pch_pf_size &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
ironlake_pfit_enable(intel_crtc);
 
/*
* On ILK+ LUT must be loaded before the pipe is running but with
3413,7 → 3386,11
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
// intel_crtc_update_cursor(crtc, true);
 
hsw_enable_ips(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
 
3421,8 → 3398,6
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
3437,6 → 3412,21
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
}
}
 
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3456,58 → 3446,51
 
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
 
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
// intel_crtc_update_cursor(crtc, false);
 
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
 
intel_disable_pipe(dev_priv, pipe);
 
/* Disable PF */
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
ironlake_pfit_disable(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
ironlake_fdi_disable(crtc);
 
ironlake_disable_pch_transcoder(dev_priv, pipe);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
temp &= ~(TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
 
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
/* C shares PLL A or B */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
BUG(); /* wtf */
}
temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
/* disable PCH DPLL */
intel_disable_pch_pll(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
 
ironlake_fdi_pll_disable(intel_crtc);
}
 
intel_crtc->active = false;
intel_update_watermarks(dev);
3534,22 → 3517,23
encoder->disable(encoder);
 
 
/* FBC must be disabled before disabling the plane on HSW. */
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
hsw_disable_ips(intel_crtc);
 
// intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
intel_disable_pipe(dev_priv, pipe);
 
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
/* XXX: Once we have proper panel fitter state tracking implemented with
* hardware state read/check support we should switch to only disable
* the panel fitter when we know it's used. */
if (intel_using_power_well(dev)) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
}
ironlake_pfit_disable(intel_crtc);
 
intel_ddi_disable_pipe_clock(intel_crtc);
 
3559,6 → 3543,7
 
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
 
3573,17 → 3558,11
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_pch_pll(intel_crtc);
intel_put_shared_dpll(intel_crtc);
}
 
static void haswell_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
 
intel_ddi_put_crtc_pll(crtc);
}
 
3629,8 → 3608,32
}
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_config *pipe_config = &crtc->config;
 
if (!crtc->config.gmch_pfit.control)
return;
 
/*
* The panel fitter should only be adjusted whilst the pipe is disabled,
* according to register description and PRM.
*/
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc->pipe);
 
I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
 
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
 
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3646,24 → 3649,71
intel_crtc->active = true;
intel_update_watermarks(dev);
 
intel_enable_pll(dev_priv, pipe);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
vlv_enable_pll(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
i9xx_pfit_enable(intel_crtc);
 
intel_crtc_load_lut(crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
// intel_crtc_update_cursor(crtc, true);
 
intel_update_fbc(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
return;
 
intel_crtc->active = true;
intel_update_watermarks(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
i9xx_enable_pll(intel_crtc);
 
i9xx_pfit_enable(intel_crtc);
 
intel_crtc_load_lut(crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
g4x_fixup_plane(dev_priv, pipe);
// intel_crtc_update_cursor(crtc, true);
 
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
 
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
// intel_crtc_update_cursor(crtc, true);
 
intel_update_fbc(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
3672,21 → 3722,16
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
uint32_t pctl = I915_READ(PFIT_CONTROL);
 
if (!crtc->config.gmch_pfit.control)
return;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
if (INTEL_INFO(dev)->gen >= 4)
pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
else
pipe = PIPE_B;
 
if (pipe == crtc->pipe) {
DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl);
DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
I915_READ(PFIT_CONTROL));
I915_WRITE(PFIT_CONTROL, 0);
}
}
 
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
3706,19 → 3751,25
/* Give the overlay scaler a chance to disable if it's on this pipe */
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
intel_crtc_dpms_overlay(intel_crtc, false);
// intel_crtc_update_cursor(crtc, false);
 
if (dev_priv->cfb_plane == plane)
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
intel_crtc_dpms_overlay(intel_crtc, false);
// intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
intel_disable_pipe(dev_priv, pipe);
 
i9xx_pfit_disable(intel_crtc);
 
intel_disable_pll(dev_priv, pipe);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
i9xx_disable_pll(dev_priv, pipe);
 
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
3793,8 → 3844,8
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
 
dev_priv->display.crtc_disable(crtc);
intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
 
3821,16 → 3872,6
}
}
 
void intel_modeset_disable(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled)
intel_crtc_disable(crtc);
}
}
 
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3839,10 → 3880,10
kfree(intel_encoder);
}
 
/* Simple dpms helper for encodres with just one connector, no cloning and only
/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
3894,8 → 3935,6
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
 
/* All the simple cases only support two dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
3906,10 → 3945,8
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
if (encoder->base.crtc)
intel_encoder_dpms(encoder, mode);
else
WARN_ON(encoder->connectors_active != false);
if (connector->encoder)
intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
 
intel_modeset_check_state(connector->dev);
}
3925,31 → 3962,139
return encoder->get_hw_state(encoder, &pipe);
}
 
static bool intel_crtc_compute_config(struct drm_crtc *crtc,
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
 
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
 
if (IS_HASWELL(dev)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
return false;
} else {
return true;
}
}
 
if (INTEL_INFO(dev)->num_pipes == 2)
return true;
 
/* Ivybridge 3 pipe is really complicated */
switch (pipe) {
case PIPE_A:
return true;
case PIPE_B:
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
return true;
case PIPE_C:
if (!pipe_has_enabled_pch(pipe_B_crtc) ||
pipe_B_crtc->config.fdi_lanes <= 2) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
} else {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
return true;
default:
BUG();
}
}
 
#define RETRY 1
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int lane, link_bw, fdi_dotclock;
bool setup_ok, needs_recompute = false;
 
retry:
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
* mode pixel clock is stored in units of 1KHz.
* Hence the bw of each lane in terms of the mode signal
* is:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
fdi_dotclock = adjusted_mode->clock;
fdi_dotclock /= pipe_config->pixel_multiplier;
 
lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
pipe_config->pipe_bpp);
 
pipe_config->fdi_lanes = lane;
 
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
 
setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
intel_crtc->pipe, pipe_config);
if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
pipe_config->pipe_bpp);
needs_recompute = true;
pipe_config->bw_constrained = true;
 
goto retry;
}
 
if (needs_recompute)
return RETRY;
 
return setup_ok ? 0 : -EINVAL;
}
 
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
pipe_config->ips_enabled = i915_enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp <= 24;
}
 
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (pipe_config->requested_mode.clock * 3
> IRONLAKE_FDI_FREQ * 4)
return false;
return -EINVAL;
}
 
/* All interlaced capable intel hw wants timings in frames. Note though
* that intel_lvds_mode_fixup does some funny tricks with the crtc
* timings, so we need to be careful not to clobber these.*/
if (!pipe_config->timings_set)
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
* with a hsync front porch of 0.
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
return false;
return -EINVAL;
 
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
3959,7 → 4104,18
pipe_config->pipe_bpp = 8*3;
}
 
return true;
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
 
/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
* clock survives for now. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
 
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
 
return 0;
}
 
static int valleyview_get_display_clock_speed(struct drm_device *dev)
3982,6 → 4138,30
return 200000;
}
 
static int pnv_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
 
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
 
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
return 267000;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
return 333000;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
return 444000;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
return 200000;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
return 133000;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
return 167000;
}
}
 
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
4068,7 → 4248,7
{
if (i915_panel_use_ssc >= 0)
return i915_panel_use_ssc != 0;
return dev_priv->lvds_use_ssc
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
 
4104,7 → 4284,7
refclk = vlv_get_refclk(crtc);
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else if (!IS_GEN2(dev)) {
4116,28 → 4296,14
return refclk;
}
 
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
{
unsigned dotclock = crtc->config.adjusted_mode.clock;
struct dpll *clock = &crtc->config.dpll;
 
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
} else if (dotclock >= 140500 && dotclock <= 200000) {
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
return (1 << dpll->n) << 16 | dpll->m2;
}
 
crtc->config.clock_set = true;
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
{
return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
}
 
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4147,32 → 4313,94
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
u32 fp, fp2 = 0;
struct dpll *clock = &crtc->config.dpll;
 
if (IS_PINEVIEW(dev)) {
fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
fp = pnv_dpll_compute_fp(&crtc->config.dpll);
if (reduced_clock)
fp2 = (1 << reduced_clock->n) << 16 |
reduced_clock->m1 << 8 | reduced_clock->m2;
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
if (reduced_clock)
fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
reduced_clock->m2;
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
 
I915_WRITE(FP0(pipe), fp);
crtc->config.dpll_hw_state.fp0 = fp;
 
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
crtc->config.dpll_hw_state.fp1 = fp;
}
}
 
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
{
u32 reg_val;
 
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
reg_val &= 0xffffff00;
vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
}
 
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
 
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
} else {
I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
}
}
 
static void intel_dp_set_m_n(struct intel_crtc *crtc)
{
if (crtc->config.has_pch_encoder)
4186,24 → 4414,12
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 dpll, mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
bool is_sdvo;
u32 temp;
u32 coreclk, reg_val, dpll_md;
 
mutex_lock(&dev_priv->dpio_lock);
 
is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
 
dpll = DPLL_VGA_MODE_DIS;
dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
dpll |= DPLL_REFA_CLK_ENABLE_VLV;
dpll |= DPLL_INTEGRATED_CLOCK_VLV;
 
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
 
bestn = crtc->config.dpll.n;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2;
4210,72 → 4426,94
bestp1 = crtc->config.dpll.p1;
bestp2 = crtc->config.dpll.p2;
 
/*
* In Valleyview PLL and program lane counter registers are exposed
* through DPIO interface
*/
/* See eDP HDMI DPIO driver vbios notes doc */
 
/* PLL B needs special handling */
if (pipe)
vlv_pllb_recal_opamp(dev_priv);
 
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
 
/* Disable target IRef on PLL */
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
reg_val &= 0x00ffffff;
vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
 
/* Disable fast lock */
vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
 
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
mdiv |= (1 << DPIO_POST_DIV_SHIFT);
mdiv |= (1 << DPIO_K_SHIFT);
mdiv |= DPIO_ENABLE_CALIBRATION;
intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
/*
* Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
* but we don't support that).
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
(5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x00d0000f);
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df40000);
else
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df70000);
else
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df40000);
}
 
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
 
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
 
I915_WRITE(DPLL(pipe), dpll);
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
if (pipe)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
 
temp = 0;
if (is_sdvo) {
temp = 0;
if (crtc->config.pixel_multiplier > 1) {
temp = (crtc->config.pixel_multiplier - 1)
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
}
I915_WRITE(DPLL_MD(pipe), temp);
POSTING_READ(DPLL_MD(pipe));
crtc->config.dpll_hw_state.dpll_md = dpll_md;
 
/* Now program lane control registers */
if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
|| intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
}
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
 
if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
4285,8 → 4523,6
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->config.dpll;
4303,16 → 4539,16
else
dpll |= DPLLB_MODE_DAC_SERIAL;
 
if (is_sdvo) {
if ((crtc->config.pixel_multiplier > 1) &&
(IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
dpll |= (crtc->config.pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
 
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
dpll |= DPLL_DVO_HIGH_SPEED;
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
4339,12 → 4575,8
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
if (crtc->config.sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4352,52 → 4584,24
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
crtc->config.dpll_hw_state.dpll = dpll;
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
 
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
if (INTEL_INFO(dev)->gen >= 4) {
u32 temp = 0;
if (is_sdvo) {
temp = 0;
if (crtc->config.pixel_multiplier > 1) {
temp = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
}
 
static void i8xx_update_pll(struct intel_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll;
struct dpll *clock = &crtc->config.dpll;
 
4416,6 → 4620,9
dpll |= PLL_P2_DIVIDE_BY_4;
}
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4423,42 → 4630,29
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
crtc->config.dpll_hw_state.dpll = dpll;
}
 
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t vsyncshift;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
 
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
 
if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal / 2;
} else {
4480,10 → 4674,10
 
I915_WRITE(VTOTAL(cpu_transcoder),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
((crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(cpu_transcoder),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
((crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(cpu_transcoder),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
4503,6 → 4697,66
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
}
 
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
uint32_t tmp;
 
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HBLANK(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HSYNC(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
 
tmp = I915_READ(VTOTAL(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VBLANK(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VSYNC(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
 
if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->adjusted_mode.crtc_vtotal += 1;
pipe_config->adjusted_mode.crtc_vblank_end += 1;
}
 
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
}
 
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_crtc *crtc = &intel_crtc->base;
 
crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
 
crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
 
crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
crtc->mode.clock = pipe_config->adjusted_mode.clock;
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
}
 
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
4509,8 → 4763,12
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pipeconf;
 
pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
pipeconf = 0;
 
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
pipeconf |= PIPECONF_ENABLE;
 
if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
4521,26 → 4779,28
if (intel_crtc->config.requested_mode.clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPECONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
 
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
if (intel_crtc->config.has_dp_encoder) {
if (intel_crtc->config.dither) {
pipeconf |= PIPECONF_6BPC |
PIPECONF_DITHER_EN |
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
}
 
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
INTEL_OUTPUT_EDP)) {
if (intel_crtc->config.dither) {
pipeconf |= PIPECONF_6BPC |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
switch (intel_crtc->config.pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
case 24:
pipeconf |= PIPECONF_8BPC;
break;
case 30:
pipeconf |= PIPECONF_10BPC;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
BUG();
}
}
 
4550,11 → 4810,9
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
} else {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4561,12 → 4819,8
else
pipeconf |= PIPECONF_PROGRESSIVE;
 
if (IS_VALLEYVIEW(dev)) {
if (intel_crtc->config.limited_color_range)
if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
else
pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
}
 
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(intel_crtc->pipe));
4579,8 → 4833,6
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
4587,8 → 4839,8
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
4598,15 → 4850,6
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
 
num_connectors++;
4620,16 → 4863,14
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
&clock);
if (!ok) {
ok = dev_priv->display.find_dpll(limit, crtc,
intel_crtc->config.port_clock,
refclk, NULL, &clock);
if (!ok && !intel_crtc->config.clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
4637,10 → 4878,10
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
has_reduced_clock = limit->find_pll(limit, crtc,
has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&clock,
refclk, &clock,
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
4652,11 → 4893,8
intel_crtc->config.dpll.p2 = clock.p2;
}
 
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(intel_crtc);
 
if (IS_GEN2(dev))
i8xx_update_pll(intel_crtc, adjusted_mode,
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
else if (IS_VALLEYVIEW(dev))
4676,11 → 4914,8
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
 
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
intel_set_pipe_timings(intel_crtc);
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
4691,10 → 4926,6
 
i9xx_set_pipeconf(intel_crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
 
intel_wait_for_vblank(dev, pipe);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
4705,6 → 4936,33
return ret;
}
 
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
tmp = I915_READ(PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
 
/* Check whether the pfit is attached to our pipe. */
if (INTEL_INFO(dev)->gen < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
return;
}
 
pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
 
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
4712,10 → 4970,45
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
 
intel_get_pipe_timings(crtc, pipe_config);
 
i9xx_get_pfit_config(crtc, pipe_config);
 
if (INTEL_INFO(dev)->gen >= 4) {
tmp = I915_READ(DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
} else {
/* Note that on i915G/GM the pixel multiplier is in the sdvo
* port and will be fixed up in the encoder->get_config
* function. */
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) {
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
DPLL_PORTC_READY_MASK |
DPLL_PORTB_READY_MASK);
}
 
return true;
}
 
4727,7 → 5020,6
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_pch_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
4742,9 → 5034,7
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
if (intel_encoder_is_pch_edp(&encoder->base))
has_pch_edp = true;
else
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
has_cpu_edp = true;
break;
}
4751,7 → 5041,7
}
 
if (HAS_PCH_IBX(dev)) {
has_ck505 = dev_priv->display_clock_mode;
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
4758,9 → 5048,8
can_ssc = true;
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
has_panel, has_lvds, has_pch_edp, has_cpu_edp,
has_ck505);
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
has_panel, has_lvds, has_ck505);
 
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
4872,45 → 5161,10
BUG_ON(val != final);
}
 
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
static void lpt_init_pch_refclk(struct drm_device *dev)
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
bool is_sdv = false;
u32 tmp;
uint32_t tmp;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
}
}
 
if (!has_vga)
return;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
udelay(24);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
if (!is_sdv) {
tmp = I915_READ(SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
4924,22 → 5178,20
I915_WRITE(SOUTH_CHICKEN2, tmp);
 
if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
100))
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
 
/* WaMPhyProgramming:hsw */
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
{
uint32_t tmp;
 
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
tmp |= 0x7FFF;
intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
4948,24 → 5200,6
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
tmp |= (0x3F << 8);
intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
tmp |= (0x3F << 8);
intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
4974,7 → 5208,6
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
4984,7 → 5217,6
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
5006,7 → 5238,6
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5026,14 → 5257,101
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
 
/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
/* Implements 3 different sequences from BSpec chapter "Display iCLK
* Programming" based on the parameters passed:
* - Sequence to enable CLKOUT_DP
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
bool with_fdi)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg, tmp;
 
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
 
mutex_lock(&dev_priv->dpio_lock);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
udelay(24);
 
if (with_spread) {
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
if (with_fdi) {
lpt_reset_fdi_mphy(dev_priv);
lpt_program_fdi_mphy(dev_priv);
}
}
 
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg, tmp;
 
mutex_lock(&dev_priv->dpio_lock);
 
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
if (!(tmp & SBI_SSCCTL_DISABLE)) {
if (!(tmp & SBI_SSCCTL_PATHALT)) {
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(32);
}
tmp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void lpt_init_pch_refclk(struct drm_device *dev)
{
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
}
}
 
if (has_vga)
lpt_enable_clkout_dp(dev, true, true);
else
lpt_disable_clkout_dp(dev);
}
 
/*
* Initialize reference clocks when the driver loads
*/
5050,7 → 5368,6
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
 
5059,9 → 5376,6
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_EDP:
edp_encoder = encoder;
break;
}
num_connectors++;
}
5068,16 → 5382,14
 
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
dev_priv->lvds_ssc_freq);
return dev_priv->lvds_ssc_freq * 1000;
dev_priv->vbt.lvds_ssc_freq);
return dev_priv->vbt.lvds_ssc_freq * 1000;
}
 
return 120000;
}
 
static void ironlake_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5084,9 → 5396,8
int pipe = intel_crtc->pipe;
uint32_t val;
 
val = I915_READ(PIPECONF(pipe));
val = 0;
 
val &= ~PIPECONF_BPC_MASK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
5105,12 → 5416,10
BUG();
}
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
if (dither)
if (intel_crtc->config.dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
val &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
5117,8 → 5426,6
 
if (intel_crtc->config.limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
else
val &= ~PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
5188,9 → 5495,7
}
}
 
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5197,14 → 5502,12
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t val;
 
val = I915_READ(PIPECONF(cpu_transcoder));
val = 0;
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
if (dither)
if (intel_crtc->config.dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
val &= ~PIPECONF_INTERLACE_MASK_HSW;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
5211,10 → 5514,12
 
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
 
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
}
 
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
5224,7 → 5529,7
struct intel_encoder *intel_encoder;
int refclk;
const intel_limit_t *limit;
bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
bool ret, is_lvds = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
5231,15 → 5536,6
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
}
 
5251,8 → 5547,9
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
clock);
ret = dev_priv->display.find_dpll(limit, crtc,
to_intel_crtc(crtc)->config.port_clock,
refclk, NULL, clock);
if (!ret)
return false;
 
5263,16 → 5560,13
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
*has_reduced_clock = limit->find_pll(limit, crtc,
*has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
clock,
refclk, clock,
reduced_clock);
}
 
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
 
return true;
}
 
5294,65 → 5588,25
POSTING_READ(SOUTH_CHICKEN1);
}
 
static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
 
DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
if (intel_crtc->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 4;
 
return false;
}
 
if (INTEL_INFO(dev)->num_pipes == 2)
return true;
 
switch (intel_crtc->pipe) {
case PIPE_A:
return true;
break;
case PIPE_B:
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
intel_crtc->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 2;
 
return false;
}
 
if (intel_crtc->fdi_lanes > 2)
if (intel_crtc->config.fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
 
return true;
break;
case PIPE_C:
if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
if (intel_crtc->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 2;
 
return false;
}
} else {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
 
cpt_enable_fdi_bc_bifurcation(dev);
 
return true;
break;
default:
BUG();
}
5369,78 → 5623,13
return bps / (link_bw * 8) + 1;
}
 
void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
 
void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
}
}
 
static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct intel_link_m_n m_n = {0};
int target_clock, lane, link_bw;
 
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
* mode pixel clock is stored in units of 1KHz.
* Hence the bw of each lane in terms of the mode signal
* is:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
if (intel_crtc->config.pixel_target_clock)
target_clock = intel_crtc->config.pixel_target_clock;
else
target_clock = adjusted_mode->clock;
 
lane = ironlake_get_lanes_required(target_clock, link_bw,
intel_crtc->config.pipe_bpp);
 
intel_crtc->fdi_lanes = lane;
 
if (intel_crtc->config.pixel_multiplier > 1)
link_bw *= intel_crtc->config.pixel_multiplier;
intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
link_bw, &m_n);
 
intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
}
 
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
intel_clock_t *clock, u32 *fp,
u32 *fp,
intel_clock_t *reduced_clock, u32 *fp2)
{
struct drm_crtc *crtc = &intel_crtc->base;
5449,7 → 5638,7
struct intel_encoder *intel_encoder;
uint32_t dpll;
int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false, is_tv = false;
bool is_lvds = false, is_sdvo = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
5459,12 → 5648,7
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
 
num_connectors++;
5474,13 → 5658,13
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
dev_priv->vbt.lvds_ssc_freq == 100) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (is_sdvo && is_tv)
} else if (intel_crtc->config.sdvo_tv_clock)
factor = 20;
 
if (clock->m < factor * clock->n)
if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
*fp |= FP_CB_TUNE;
 
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
5492,23 → 5676,21
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
if (intel_crtc->config.pixel_multiplier > 1) {
 
dpll |= (intel_crtc->config.pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (intel_crtc->config.has_dp_encoder &&
intel_crtc->config.has_pch_encoder)
dpll |= DPLL_DVO_HIGH_SPEED;
 
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->config.has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
switch (clock->p2) {
switch (intel_crtc->config.dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
5523,18 → 5705,12
break;
}
 
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
 
return dpll;
return dpll | DPLL_VCO_ENABLE;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5544,19 → 5720,16
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int ret;
bool dither, fdi_config_ok;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
5571,11 → 5744,9
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
intel_crtc->config.cpu_transcoder = pipe;
 
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
ok = ironlake_compute_clocks(crtc, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok) {
if (!ok && !intel_crtc->config.clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
5588,84 → 5759,57
intel_crtc->config.dpll.p2 = clock.p2;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
/* determine panel color depth */
dither = intel_crtc->config.dither;
if (is_lvds && dev_priv->lvds_dither)
dither = true;
 
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
 
dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
dpll = ironlake_compute_dpll(intel_crtc,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
intel_crtc->config.dpll_hw_state.dpll = dpll;
intel_crtc->config.dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
intel_crtc->config.dpll_hw_state.fp1 = fp2;
else
intel_crtc->config.dpll_hw_state.fp1 = fp;
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
struct intel_pch_pll *pll;
 
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
pipe);
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe));
return -EINVAL;
}
} else
intel_put_pch_pll(intel_crtc);
intel_put_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (is_lvds && has_reduced_clock && i915_powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
 
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
if (intel_crtc->config.has_pch_encoder) {
pll = intel_crtc_to_shared_dpll(intel_crtc);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
 
intel_crtc->lowfreq_avail = false;
if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
}
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
if (IS_IVYBRIDGE(dev))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
 
/* Note, this also computes intel_crtc->fdi_lanes which is used below in
* ironlake_check_fdi_lanes. */
intel_crtc->fdi_lanes = 0;
if (intel_crtc->config.has_pch_encoder)
ironlake_fdi_set_m_n(crtc);
ironlake_set_pipeconf(crtc);
 
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_wait_for_vblank(dev, pipe);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
5674,11 → 5818,49
 
intel_update_watermarks(dev);
 
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
return ret;
}
 
return fdi_config_ok ? ret : -EINVAL;
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder transcoder = pipe_config->cpu_transcoder;
 
pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
& ~TU_SIZE_MASK;
pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
 
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
tmp = I915_READ(PF_CTL(crtc->pipe));
 
if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
 
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN7(dev)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
}
}
 
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
5686,114 → 5868,399
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
 
if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
 
pipe_config->has_pch_encoder = true;
 
tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
if (HAS_PCH_IBX(dev_priv->dev)) {
pipe_config->shared_dpll =
(enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
else
pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
}
 
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
 
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
 
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
 
intel_get_pipe_timings(crtc, pipe_config);
 
ironlake_get_pfit_config(crtc, pipe_config);
 
return true;
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = false;
struct drm_device *dev = dev_priv->dev;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
unsigned long irqflags;
uint32_t val;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (crtc->pipe != PIPE_A && crtc->base.enabled)
enable = true;
/* XXX: Should check for edp transcoder here, but thanks to init
* sequence that's not yet available. Just in case desktop eDP
* on PORT D is possible on haswell, too. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
 
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
WARN(plls->spll_refcount, "SPLL enabled\n");
WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
WARN((val & ~DE_PCH_EVENT_IVB) != val,
"Unexpected DEIMR bits enabled: 0x%x\n", val);
val = I915_READ(SDEIMR);
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
"Unexpected SDEIMR bits enabled: 0x%x\n", val);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->type != INTEL_OUTPUT_EDP &&
encoder->connectors_active)
enable = true;
/*
* This function implements pieces of two sequences from BSpec:
* - Sequence for display software to disable LCPLL
* - Sequence for display software to allow package C8+
* The steps implemented here are just the steps that actually touch the LCPLL
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
uint32_t val;
 
assert_can_disable_lcpll(dev_priv);
 
val = I915_READ(LCPLL_CTL);
 
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
 
val = I915_READ(LCPLL_CTL);
}
 
/* Even the eDP panel fitter is outside the always-on well. */
if (dev_priv->pch_pf_size)
enable = true;
val |= LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
 
intel_set_power_well(dev, enable);
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
 
val = I915_READ(D_COMP);
val |= D_COMP_COMP_DISABLE;
I915_WRITE(D_COMP, val);
POSTING_READ(D_COMP);
udelay(100);
 
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
 
if (allow_power_down) {
val = I915_READ(LCPLL_CTL);
val |= LCPLL_POWER_DOWN_ALLOW;
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
}
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
/*
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
bool is_cpu_edp = false;
struct intel_encoder *encoder;
int ret;
bool dither;
uint32_t val;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
is_cpu_edp = true;
break;
val = I915_READ(LCPLL_CTL);
 
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
return;
 
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
dev_priv->uncore.funcs.force_wake_get(dev_priv);
 
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
}
 
num_connectors++;
val = I915_READ(D_COMP);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
I915_WRITE(D_COMP, val);
POSTING_READ(D_COMP);
 
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
DRM_ERROR("LCPLL not locked yet\n");
 
if (val & LCPLL_CD_SOURCE_FCLK) {
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
}
 
if (is_cpu_edp)
intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
dev_priv->uncore.funcs.force_wake_put(dev_priv);
}
 
void hsw_enable_pc8_work(struct work_struct *__work)
{
struct drm_i915_private *dev_priv =
container_of(to_delayed_work(__work), struct drm_i915_private,
pc8.enable_work);
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
if (dev_priv->pc8.enabled)
return;
 
DRM_DEBUG_KMS("Enabling package C8+\n");
 
dev_priv->pc8.enabled = true;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
 
lpt_disable_clkout_dp(dev);
hsw_pc8_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
}
 
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 1,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
 
dev_priv->pc8.disable_count--;
if (dev_priv->pc8.disable_count != 0)
return;
 
schedule_delayed_work(&dev_priv->pc8.enable_work,
msecs_to_jiffies(i915_pc8_timeout));
}
 
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 0,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
 
dev_priv->pc8.disable_count++;
if (dev_priv->pc8.disable_count != 1)
return;
 
// cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (!dev_priv->pc8.enabled)
return;
 
DRM_DEBUG_KMS("Disabling package C8+\n");
 
hsw_restore_lcpll(dev_priv);
hsw_pc8_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
 
intel_prepare_ddi(dev);
i915_gem_init_swizzling(dev);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
dev_priv->pc8.enabled = false;
}
 
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
}
 
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
}
 
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
uint32_t val;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
if (crtc->base.enabled)
return false;
 
/* This case is still possible since we have the i915.disable_power_well
* parameter and also the KVMr or something else might be requesting the
* power well. */
val = I915_READ(HSW_PWR_WELL_DRIVER);
if (val != 0) {
DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
return false;
}
 
return true;
}
 
/* Since we're called from modeset_global_resources there's no way to
* symmetrically increase and decrease the refcount, so we use
* dev_priv->pc8.requirements_met to track whether we already have the refcount
* or not.
*/
static void hsw_update_package_c8(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
 
if (!i915_enable_pc8)
return;
 
mutex_lock(&dev_priv->pc8.lock);
 
allow = hsw_can_enable_package_c8(dev_priv);
 
if (allow == dev_priv->pc8.requirements_met)
goto done;
 
dev_priv->pc8.requirements_met = allow;
 
if (allow)
__hsw_enable_package_c8(dev_priv);
else
intel_crtc->config.cpu_transcoder = pipe;
__hsw_disable_package_c8(dev_priv);
 
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
done:
mutex_unlock(&dev_priv->pc8.lock);
}
 
WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
num_connectors, pipe_name(pipe));
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
hsw_enable_package_c8(dev_priv);
}
}
 
WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
hsw_disable_package_c8(dev_priv);
}
}
 
WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
static void haswell_modeset_global_resources(struct drm_device *dev)
{
bool enable = false;
struct intel_crtc *crtc;
 
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (!crtc->base.enabled)
continue;
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
crtc->config.cpu_transcoder != TRANSCODER_EDP)
enable = true;
}
 
/* determine panel color depth */
dither = intel_crtc->config.dither;
intel_set_power_well(dev, enable);
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
hsw_update_package_c8(dev);
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
int ret;
 
if (!intel_ddi_pll_mode_set(crtc))
return -EINVAL;
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_crtc->lowfreq_avail = false;
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
ironlake_fdi_set_m_n(crtc);
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
 
haswell_set_pipeconf(crtc, adjusted_mode, dither);
haswell_set_pipeconf(crtc);
 
intel_set_pipe_csc(crtc);
 
5805,8 → 6272,6
 
intel_update_watermarks(dev);
 
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 
return ret;
}
 
5815,23 → 6280,70
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
 
tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder));
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
if (tmp & TRANS_DDI_FUNC_ENABLE) {
enum pipe trans_edp_pipe;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
WARN(1, "unknown pipe linked to edp transcoder\n");
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
trans_edp_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
trans_edp_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
trans_edp_pipe = PIPE_C;
break;
}
 
if (trans_edp_pipe == crtc->pipe)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
 
if (!intel_display_power_enabled(dev,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
 
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
return false;
 
/*
* aswell has only FDI/PCH transcoder A. It is which is connected to
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
 
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
 
intel_get_pipe_timings(crtc, pipe_config);
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_enabled(dev, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
 
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
 
pipe_config->pixel_multiplier = 1;
 
return true;
}
 
5841,11 → 6353,8
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
5864,13 → 6373,8
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
if (encoder->mode_set) {
encoder->mode_set(encoder);
} else {
encoder_funcs = encoder->base.helper_private;
encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
}
}
 
return 0;
}
5976,15 → 6480,15
 
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
 
/* Enable HDMI mode */
tmp = I915_READ(aud_config);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
/* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp);
6068,7 → 6572,7
eldv |= IBX_ELD_VALIDB << 4;
eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
}
 
6136,17 → 6640,32
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int palreg = PALETTE(intel_crtc->pipe);
enum pipe pipe = intel_crtc->pipe;
int palreg = PALETTE(pipe);
int i;
bool reenable_ips = false;
 
/* The clocks have to be on to load the palette. */
if (!crtc->enabled || !intel_crtc->active)
return;
 
if (!HAS_PCH_SPLIT(dev_priv->dev))
assert_pll_enabled(dev_priv, pipe);
 
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
palreg = LGC_PALETTE(intel_crtc->pipe);
palreg = LGC_PALETTE(pipe);
 
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (intel_crtc->config.ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
}
 
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
6153,6 → 6672,9
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
}
 
if (reenable_ips)
hsw_enable_ips(intel_crtc);
}
 
#if 0
6229,8 → 6751,10
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
if (IS_HASWELL(dev))
if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
intel_crtc->cursor_visible = visible;
6367,7 → 6891,7
goto fail_unpin;
}
 
addr = obj->gtt_offset;
addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
6389,7 → 6913,7
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
 
6400,11 → 6924,12
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
 
// intel_crtc_update_cursor(crtc, true);
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
return 0;
fail_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
6419,7 → 6944,8
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
 
// intel_crtc_update_cursor(crtc, true);
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
return 0;
}
6513,18 → 7039,7
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
// obj = i915_gem_alloc_object(dev,
// intel_framebuffer_size_for_mode(mode, bpp));
// if (obj == NULL)
return ERR_PTR(-ENOMEM);
 
// mode_cmd.width = mode->hdisplay;
// mode_cmd.height = mode->vdisplay;
// mode_cmd.depth = depth;
// mode_cmd.bpp = bpp;
// mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
 
// return intel_framebuffer_create(dev, &mode_cmd, obj);
return NULL;
}
 
static struct drm_framebuffer *
6695,11 → 7210,12
}
 
/* Returns the clock of the currently programmed mode of the given pipe. */
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int pipe = pipe_config->cpu_transcoder;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
6738,11 → 7254,14
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
pipe_config->adjusted_mode.clock = 0;
return;
}
 
/* XXX: Handle the 100Mhz refclk */
intel_clock(dev, 96000, &clock);
if (IS_PINEVIEW(dev))
pineview_clock(96000, &clock);
else
i9xx_clock(96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
6754,9 → 7273,9
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
intel_clock(dev, 66000, &clock);
i9xx_clock(66000, &clock);
} else
intel_clock(dev, 48000, &clock);
i9xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
6769,16 → 7288,58
else
clock.p2 = 2;
 
intel_clock(dev, 48000, &clock);
i9xx_clock(48000, &clock);
}
}
 
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
pipe_config->adjusted_mode.clock = clock.dot;
}
 
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
int link_freq, repeat;
u64 clock;
u32 link_m, link_n;
 
repeat = pipe_config->pixel_multiplier;
 
/*
* The calculation for the data clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
* But we want to avoid losing precison if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
*
* and the link clock is simpler:
* link_clock = (m * link_clock * repeat) / n
*/
 
return clock.dot;
/*
* We need to get the FDI or DP link clock here to derive
* the M/N dividers.
*
* For FDI, we read it from the BIOS or use a fixed 2.7GHz.
* For DP, it's either 1.62GHz or 2.7GHz.
* We do our calculations in 10*MHz since we don't need much precison.
*/
if (pipe_config->has_pch_encoder)
link_freq = intel_fdi_link_freq(dev) * 10000;
else
link_freq = pipe_config->port_clock;
 
link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
 
if (!link_m || !link_n)
return;
 
clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
do_div(clock, link_n);
 
pipe_config->adjusted_mode.clock = clock;
}
 
/** Returns the currently programmed mode of the given pipe. */
6789,6 → 7350,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_config pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
6798,7 → 7360,18
if (!mode)
return NULL;
 
mode->clock = intel_crtc_clock_get(dev, crtc);
/*
* Construct a pipe_config sufficient for getting the clock info
* back out of crtc_clock_get.
*
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
pipe_config.pixel_multiplier = 1;
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
 
mode->clock = pipe_config.adjusted_mode.clock;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
6882,13 → 7455,19
 
void intel_mark_busy(struct drm_device *dev)
{
i915_update_gfx_val(dev->dev_private);
struct drm_i915_private *dev_priv = dev->dev_private;
 
hsw_package_c8_gpu_busy(dev_priv);
i915_update_gfx_val(dev_priv);
}
 
void intel_mark_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
hsw_package_c8_gpu_idle(dev_priv);
 
if (!i915_powersave)
return;
 
6900,7 → 7479,8
}
}
 
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
6912,8 → 7492,12
if (!crtc->fb)
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj == obj)
if (to_intel_framebuffer(crtc->fb)->obj != obj)
continue;
 
intel_increase_pllclock(crtc);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
 
7047,7 → 7631,8
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7075,7 → 7660,7
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
 
intel_mark_page_flip_active(intel_crtc);
7091,7 → 7676,8
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7116,7 → 7702,7
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
 
intel_mark_page_flip_active(intel_crtc);
7132,7 → 7718,8
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7156,7 → 7743,7
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
(obj->gtt_offset + intel_crtc->dspaddr_offset) |
(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
 
/* XXX Enabling the panel-fitter across page-flip is so far
7180,7 → 7767,8
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7199,7 → 7787,7
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
 
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
7221,23 → 7809,22
return ret;
}
 
/*
* On gen7 we currently use the blit ring because (in early silicon at least)
* the render ring doesn't give us interrpts for page flip completion, which
* means clients will hang after the first flip is queued. Fortunately the
* blit ring generates interrupts properly, so use it instead.
*/
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
int ret;
int len, ret;
 
ring = obj->ring;
if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
7258,13 → 7845,37
goto err_unpin;
}
 
ret = intel_ring_begin(ring, 4);
len = 4;
if (ring->id == RCS)
len += 6;
 
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
 
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
* more than one flip event at any time (or ensure that one flip message
* can be sent by waiting for flip-done prior to queueing new flips).
* Experimentation says that BCS works despite DERRMR masking all
* flip-done completion events and that unmasking all planes at once
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
 
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
 
intel_mark_page_flip_active(intel_crtc);
7280,7 → 7891,8
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
return -ENODEV;
}
7287,7 → 7899,8
 
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
7357,12 → 7970,12
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
if (ret)
goto cleanup_pending;
 
intel_disable_fbc(dev);
intel_mark_fb_busy(obj);
intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
 
trace_i915_flip_request(intel_crtc->plane, obj);
7387,7 → 8000,6
 
return ret;
}
 
#endif
 
static struct drm_crtc_helper_funcs intel_helper_funcs = {
7395,28 → 8007,6
.load_lut = intel_crtc_load_lut,
};
 
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
struct intel_encoder *other_encoder;
struct drm_crtc *crtc = &encoder->new_crtc->base;
 
if (WARN_ON(!crtc))
return false;
 
list_for_each_entry(other_encoder,
&crtc->dev->mode_config.encoder_list,
base.head) {
 
if (&other_encoder->new_crtc->base != crtc ||
encoder == other_encoder)
continue;
else
return true;
}
 
return false;
}
 
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *crtc)
{
7484,13 → 8074,39
}
}
 
static void
connected_sink_compute_bpp(struct intel_connector * connector,
struct intel_crtc_config *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
 
/* Don't use an invalid EDID bpc value */
if (connector->base.display_info.bpc &&
connector->base.display_info.bpc * 3 < bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
bpp, connector->base.display_info.bpc*3);
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
 
/* Clamp bpp to 8 on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0 && bpp > 24) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
bpp);
pipe_config->pipe_bpp = 24;
}
}
 
static int
pipe_config_set_bpp(struct drm_crtc *crtc,
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_device *dev = crtc->base.dev;
struct intel_connector *connector;
int bpp;
 
switch (fb->pixel_format) {
7533,22 → 8149,67
 
/* Clamp display bpp to EDID value */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
if (connector->encoder && connector->encoder->crtc != crtc)
base.head) {
if (!connector->new_encoder ||
connector->new_encoder->new_crtc != crtc)
continue;
 
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc * 3 < bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
bpp, connector->display_info.bpc*3);
pipe_config->pipe_bpp = connector->display_info.bpc*3;
connected_sink_compute_bpp(connector, pipe_config);
}
}
 
return bpp;
}
 
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
const char *context)
{
DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
context, pipe_name(crtc->pipe));
 
DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
pipe_config->pipe_bpp, pipe_config->dither);
DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
pipe_config->has_pch_encoder,
pipe_config->fdi_lanes,
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
pipe_config->pch_pfit.pos,
pipe_config->pch_pfit.size,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
}
 
static bool check_encoder_cloning(struct drm_crtc *crtc)
{
int num_encoders = 0;
bool uncloneable_encoders = false;
struct intel_encoder *encoder;
 
list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
base.head) {
if (&encoder->new_crtc->base != crtc)
continue;
 
num_encoders++;
if (!encoder->cloneable)
uncloneable_encoders = true;
}
 
return !(num_encoders > 1 && uncloneable_encoders);
}
 
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
7555,11 → 8216,16
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp;
int plane_bpp, ret = -EINVAL;
bool retry = true;
 
if (!check_encoder_cloning(crtc)) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
return ERR_PTR(-EINVAL);
}
 
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
7566,11 → 8232,40
 
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
 
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
* after encoders and crtc also have had their say. */
plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
fb, pipe_config);
if (plane_bpp < 0)
goto fail;
 
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
 
/* Fill in default crtc timings, allow encoders to overwrite them. */
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
 
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
7581,30 → 8276,34
if (&encoder->new_crtc->base != crtc)
continue;
 
if (encoder->compute_config) {
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
 
continue;
}
 
encoder_funcs = encoder->base.helper_private;
if (!(encoder_funcs->mode_fixup(&encoder->base,
&pipe_config->requested_mode,
&pipe_config->adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
pipe_config->port_clock = pipe_config->adjusted_mode.clock;
 
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret < 0) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto fail;
}
}
 
if (!(intel_crtc_compute_config(crtc, pipe_config))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
if (ret == RETRY) {
if (WARN(!retry, "loop in pipe configuration computation\n")) {
ret = -EINVAL;
goto fail;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
retry = false;
goto encoder_retry;
}
 
pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7612,7 → 8311,7
return pipe_config;
fail:
kfree(pipe_config);
return ERR_PTR(-EINVAL);
return ERR_PTR(ret);
}
 
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
7708,6 → 8407,9
*/
*modeset_pipes &= 1 << intel_crtc->pipe;
*prepare_pipes &= 1 << intel_crtc->pipe;
 
DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
*modeset_pipes, *prepare_pipes, *disable_pipes);
}
 
static bool intel_crtc_in_use(struct drm_crtc *crtc)
7770,35 → 8472,152
 
}
 
static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
struct intel_crtc_config *new)
{
int clock1, clock2, diff;
 
clock1 = cur->adjusted_mode.clock;
clock2 = new->adjusted_mode.clock;
 
if (clock1 == clock2)
return true;
 
if (!clock1 || !clock2)
return false;
 
diff = abs(clock1 - clock2);
 
if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
return true;
 
return false;
}
 
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe)) \
if (mask & (1 <<(intel_crtc)->pipe))
 
static bool
intel_pipe_config_compare(struct intel_crtc_config *current_config,
intel_pipe_config_compare(struct drm_device *dev,
struct intel_crtc_config *current_config,
struct intel_crtc_config *pipe_config)
{
if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
DRM_ERROR("mismatch in has_pch_encoder "
"(expected %i, found %i)\n",
current_config->has_pch_encoder,
pipe_config->has_pch_encoder);
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
DRM_ERROR("mismatch in " #name " " \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
return false; \
}
 
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
DRM_ERROR("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
return false; \
}
 
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
DRM_ERROR("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
return false; \
}
 
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
 
PIPE_CONF_CHECK_I(cpu_transcoder);
 
PIPE_CONF_CHECK_I(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
PIPE_CONF_CHECK_I(fdi_m_n.link_m);
PIPE_CONF_CHECK_I(fdi_m_n.link_n);
PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
 
PIPE_CONF_CHECK_I(pixel_multiplier);
 
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
 
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
 
PIPE_CONF_CHECK_I(requested_mode.hdisplay);
PIPE_CONF_CHECK_I(requested_mode.vdisplay);
 
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
PIPE_CONF_CHECK_I(pch_pfit.size);
}
 
PIPE_CONF_CHECK_I(ips_enabled);
 
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_QUIRK
 
if (!IS_HASWELL(dev)) {
if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
current_config->adjusted_mode.clock,
pipe_config->adjusted_mode.clock);
return false;
}
}
 
return true;
}
 
void
intel_modeset_check_state(struct drm_device *dev)
static void
check_connector_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct intel_crtc_config pipe_config;
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
7809,7 → 8628,14
WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
}
}
 
static void
check_encoder_state(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
bool enabled = false;
7860,12 → 8686,23
tracked_pipe, pipe);
 
}
}
 
static void
check_crtc_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
bool enabled = false;
bool active = false;
 
memset(&pipe_config, 0, sizeof(pipe_config));
 
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
 
7880,6 → 8717,7
if (encoder->connectors_active)
active = true;
}
 
WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
7887,7 → 8725,6
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
 
memset(&pipe_config, 0, sizeof(pipe_config));
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
 
7895,16 → 8732,93
if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
active = crtc->active;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
if (encoder->get_config &&
encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
 
if (dev_priv->display.get_clock)
dev_priv->display.get_clock(crtc, &pipe_config);
 
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
 
WARN(active &&
!intel_pipe_config_compare(&crtc->config, &pipe_config),
"pipe state doesn't match!\n");
if (active &&
!intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
"[hw state]");
intel_dump_pipe_config(crtc, &crtc->config,
"[sw state]");
}
}
}
 
static void
check_shared_dpll_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_dpll_hw_state dpll_hw_state;
int i;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
int enabled_crtcs = 0, active_crtcs = 0;
bool active;
 
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
DRM_DEBUG_KMS("%s\n", pll->name);
 
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
WARN(pll->active > pll->refcount,
"more active pll users than references: %i vs %i\n",
pll->active, pll->refcount);
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
"pll in on but not on in use in sw tracking\n");
WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
}
WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
WARN(pll->refcount != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
pll->refcount, enabled_crtcs);
 
WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
}
 
void
intel_modeset_check_state(struct drm_device *dev)
{
check_connector_state(dev);
check_encoder_state(dev);
check_crtc_state(dev);
check_shared_dpll_state(dev);
}
 
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
7941,11 → 8855,10
 
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
}
 
DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
modeset_pipes, prepare_pipes, disable_pipes);
 
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
7958,12 → 8871,10
* to set it here already despite that we pass it down the callchain.
*/
if (modeset_pipes) {
enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
crtc->mode = *mode;
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
}
 
/* Only after disabling all output pipelines that will be changed can we
8011,7 → 8922,7
return ret;
}
 
int intel_set_mode(struct drm_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
8099,15 → 9010,20
}
 
static bool
is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
int num_connectors)
is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
 
for (i = 0; i < num_connectors; i++)
if (connectors[i].encoder &&
connectors[i].encoder->crtc == crtc &&
connectors[i].dpms != DRM_MODE_DPMS_ON)
if (set->num_connectors == 0)
return false;
 
if (WARN_ON(set->connectors == NULL))
return false;
 
for (i = 0; i < set->num_connectors; i++)
if (set->connectors[i]->encoder &&
set->connectors[i]->encoder->crtc == set->crtc &&
set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
 
return false;
8120,15 → 9036,21
 
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->connectors != NULL &&
is_crtc_connector_off(set->crtc, *set->connectors,
set->num_connectors)) {
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
 
if (intel_crtc->active && i915_fastboot) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
DRM_DEBUG_KMS("inactive crtc, full mode set\n");
config->mode_changed = true;
}
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
8148,6 → 9070,9
drm_mode_debug_printmodeline(set->mode);
config->mode_changed = true;
}
 
DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
set->crtc->base.id, config->mode_changed, config->fb_changed);
}
 
static int
8158,7 → 9083,7
struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
int count, ro;
int ro;
 
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
8165,7 → 9090,6
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* Otherwise traverse passed in connector list and get encoders
8199,7 → 9123,6
/* connector->new_encoder is now updated for all connectors. */
 
/* Update crtc of enabled connectors. */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (!connector->new_encoder)
8302,12 → 9225,6
goto fail;
 
if (config->mode_changed) {
if (set->mode) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
}
 
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
8318,7 → 9235,7
}
 
if (ret) {
DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
fail:
intel_set_config_restore_state(dev, config);
8350,23 → 9267,103
intel_ddi_pll_init(dev);
}
 
static void intel_pch_pll_init(struct drm_device *dev)
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
uint32_t val;
 
if (dev_priv->num_pch_pll == 0) {
DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
return;
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
 
return val & DPLL_VCO_ENABLE;
}
 
for (i = 0; i < dev_priv->num_pch_pll; i++) {
dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
}
 
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
 
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
 
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
 
/* Make sure no transcoder isn't still depending on us. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (intel_crtc_to_shared_dpll(crtc) == pll)
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
 
I915_WRITE(PCH_DPLL(pll->id), 0);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
 
static char *ibx_pch_dpll_names[] = {
"PCH DPLL A",
"PCH DPLL B",
};
 
static void ibx_pch_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
dev_priv->num_shared_dpll = 2;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
dev_priv->shared_dplls[i].get_hw_state =
ibx_pch_dpll_get_hw_state;
}
}
 
static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
 
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
DRM_DEBUG_KMS("%i shared PLLs initialized\n",
dev_priv->num_shared_dpll);
}
 
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
8389,7 → 9386,6
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
intel_crtc->config.cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
8472,13 → 9468,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds;
 
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
}
intel_lvds_init(dev);
 
if (!IS_ULT(dev))
intel_crt_init(dev);
8531,8 → 9522,13
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
PORT_C);
}
 
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
8551,11 → 9547,9
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
 
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
if (!found && SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_B, PORT_B);
}
}
 
/* Before G4X SDVOC doesn't have its own detect register */
 
8570,17 → 9564,13
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
if (SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_C, PORT_C);
}
}
 
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
(I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
8610,6 → 9600,7
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int pitch_limit;
int ret;
 
if (obj->tiling_mode == I915_TILING_Y) {
8623,10 → 9614,26
return -EINVAL;
}
 
/* FIXME <= Gen4 stride limits are bit unclear */
if (mode_cmd->pitches[0] > 32768) {
DRM_DEBUG("pitch (%d) must be at less than 32768\n",
mode_cmd->pitches[0]);
if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode)
pitch_limit = 16*1024;
else
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 3) {
if (obj->tiling_mode)
pitch_limit = 8*1024;
else
pitch_limit = 16*1024;
} else
/* XXX DSPC is limited to 4k tiled */
pitch_limit = 8*1024;
 
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
obj->tiling_mode ? "tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
return -EINVAL;
}
 
8647,7 → 9654,8
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
8658,7 → 9666,8
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
8667,12 → 9676,14
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 5) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
default:
DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
 
8703,6 → 9714,15
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
dev_priv->display.find_dpll = g4x_find_best_dpll;
else if (IS_VALLEYVIEW(dev))
dev_priv->display.find_dpll = vlv_find_best_dpll;
else if (IS_PINEVIEW(dev))
dev_priv->display.find_dpll = pnv_find_best_dpll;
else
dev_priv->display.find_dpll = i9xx_find_best_dpll;
 
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8712,13 → 9732,23
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
8736,9 → 9766,12
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_PINEVIEW(dev))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
8817,6 → 9850,17
DRM_INFO("applying inverted panel brightness quirk\n");
}
 
/*
* Some machines (Dell XPS13) suffer broken backlight controls if
* BLM_PCH_PWM_ENABLE is set.
*/
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
}
 
struct intel_quirk {
int device;
int subsystem_vendor;
8886,6 → 9930,11
 
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
 
static void intel_init_quirks(struct drm_device *dev)
8980,18 → 10029,18
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < dev_priv->num_plane; j++) {
ret = intel_plane_init(dev, i, j);
if (ret)
DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
i, j, ret);
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
pipe_name(i), sprite_name(i, j), ret);
}
}
 
intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
intel_shared_dpll_init(dev);
 
/* Just disable it once at startup */
i915_disable_vga(dev);
9186,6 → 10235,17
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
 
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
* paranoid "someone might have enabled VGA while we were not looking"
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (HAS_POWER_WELL(dev) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
 
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
9192,57 → 10252,18
}
}
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 tmp;
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
int i;
 
if (HAS_DDI(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
if (tmp & TRANS_DDI_FUNC_ENABLE) {
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
default:
/* A bogus value has been programmed, disable
* the transcoder */
WARN(1, "Bogus eDP source %08x\n", tmp);
intel_ddi_disable_transcoder_func(dev_priv,
TRANSCODER_EDP);
goto setup_pipes;
}
 
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
crtc->config.cpu_transcoder = TRANSCODER_EDP;
 
DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
pipe_name(pipe));
}
}
 
setup_pipes:
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
enum transcoder tmp = crtc->config.cpu_transcoder;
memset(&crtc->config, 0, sizeof(crtc->config));
crtc->config.cpu_transcoder = tmp;
 
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
9254,16 → 10275,35
crtc->active ? "enabled" : "disabled");
}
 
/* FIXME: Smash this into the new shared dpll infrastructure. */
if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
pll->active = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
pll->active++;
}
pll->refcount = pll->active;
 
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
 
if (encoder->get_hw_state(encoder, &pipe)) {
encoder->base.crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
if (encoder->get_config)
encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}
9276,6 → 10316,15
pipe);
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (!crtc->active)
continue;
if (dev_priv->display.get_clock)
dev_priv->display.get_clock(crtc,
&crtc->config);
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
9291,7 → 10340,38
drm_get_connector_name(&connector->base),
connector->base.encoder ? "enabled" : "disabled");
}
}
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
int i;
 
intel_modeset_readout_hw_state(dev);
 
/*
* Now that we have the config, copy it to each CRTC struct
* Note that this could go away if we move to using crtc_config
* checking everywhere.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->active && i915_fastboot) {
intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
 
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
}
}
 
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
9301,8 → 10381,21
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
}
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
if (!pll->on || pll->active)
continue;
 
DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
 
pll->disable(dev_priv, pll);
pll->on = false;
}
 
if (force_restore) {
/*
* We need to use raw interfaces for restoring state to avoid
9342,20 → 10435,29
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
 
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of rps, connectors, ...) would
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
// drm_kms_helper_poll_fini(dev);
 
mutex_lock(&dev->struct_mutex);
 
// intel_unregister_dsm_handler();
 
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
 
intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
 
9365,14 → 10467,10
 
ironlake_teardown_rc6(dev);
 
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
 
mutex_unlock(&dev->struct_mutex);
 
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
// drm_irq_uninstall(dev);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
// cancel_work_sync(&dev_priv->hotplug_work);
// cancel_work_sync(&dev_priv->rps.work);
 
9420,6 → 10518,11
#include <linux/seq_file.h>
 
struct intel_display_error_state {
 
u32 power_well_driver;
 
int num_transcoders;
 
struct intel_cursor_error_state {
u32 control;
u32 position;
9428,15 → 10531,7
} cursor[I915_MAX_PIPES];
 
struct intel_pipe_error_state {
u32 conf;
u32 source;
 
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} pipe[I915_MAX_PIPES];
 
struct intel_plane_error_state {
9448,6 → 10543,19
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
 
struct intel_transcoder_error_state {
enum transcoder cpu_transcoder;
 
u32 conf;
 
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} transcoder[4];
};
 
struct intel_display_error_state *
9455,16 → 10563,25
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
enum transcoder cpu_transcoder;
int transcoders[] = {
TRANSCODER_A,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP,
};
int i;
 
if (INTEL_INFO(dev)->num_pipes == 0)
return NULL;
 
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
 
if (HAS_POWER_WELL(dev))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
for_each_pipe(i) {
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
 
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
9488,56 → 10605,86
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
 
error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
 
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
if (HAS_DDI(dev_priv->dev))
error->num_transcoders++; /* Account for eDP. */
 
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
 
error->transcoder[i].cpu_transcoder = cpu_transcoder;
 
error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
 
/* In the code above we read the registers without checking if the power
* well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
* prevent the next I915_WRITE from detecting it and printing an error
* message. */
intel_uncore_clear_errors(dev);
 
return error;
}
 
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
 
void
intel_display_print_error_state(struct seq_file *m,
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
int i;
 
seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (!error)
return;
 
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
 
seq_printf(m, "Plane [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
if (INTEL_INFO(dev)->gen <= 3) {
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
seq_printf(m, " POS: %08x\n", error->plane[i].pos);
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
 
seq_printf(m, "Cursor [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
seq_printf(m, " POS: %08x\n", error->cursor[i].position);
seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
err_printf(m, "Cursor [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
 
for (i = 0; i < error->num_transcoders; i++) {
err_printf(m, " CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
#endif