Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6936 → Rev 6937

/drivers/video/drm/i915/intel_pm.c
71,6 → 71,14
*/
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
 
/*
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
288,7 → 296,7
struct drm_device *dev = dev_priv->dev;
u32 val;
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
1713,13 → 1721,6
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
 
struct skl_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate; /* in KHz */
struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
};
 
struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
1727,13 → 1728,6
uint16_t fbc;
};
 
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
};
 
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
1988,9 → 1982,11
const struct intel_crtc *intel_crtc,
int level,
struct intel_crtc_state *cstate,
struct intel_plane_state *pristate,
struct intel_plane_state *sprstate,
struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
struct intel_plane *intel_plane;
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
uint16_t spr_latency = dev_priv->wm.spr_latency[level];
uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2002,29 → 1998,11
cur_latency *= 5;
}
 
for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
struct intel_plane_state *pstate =
to_intel_plane_state(intel_plane->base.state);
 
switch (intel_plane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
result->pri_val = ilk_compute_pri_wm(cstate, pstate,
pri_latency,
level);
result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
result->pri_val);
break;
case DRM_PLANE_TYPE_OVERLAY:
result->spr_val = ilk_compute_spr_wm(cstate, pstate,
spr_latency);
break;
case DRM_PLANE_TYPE_CURSOR:
result->cur_val = ilk_compute_cur_wm(cstate, pstate,
cur_latency);
break;
}
}
 
result->pri_val = ilk_compute_pri_wm(cstate, pristate,
pri_latency, level);
result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
result->enable = true;
}
 
2102,34 → 2080,32
GEN9_MEM_LATENCY_LEVEL_MASK;
 
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
*/
for (level = 1; level <= max_level; level++) {
if (wm[level] == 0) {
for (i = level + 1; i <= max_level; i++)
wm[i] = 0;
break;
}
}
 
/*
* WaWmMemoryReadLatency:skl
*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from the
* punit when level 0 response data us 0us.
* to add 2us to the various latency levels we retrieve from
* the punit.
* - W0 is a bit special in that it's the only level that
* can't be disabled if we want to have display working, so
* we always add 2us there.
* - For levels >=1, punit returns 0us latency when they are
* disabled, so we respect that and don't add 2us then
*
* Additionally, if a level n (n > 1) has a 0us latency, all
* levels m (m >= n) need to be disabled. We make sure to
* sanitize the values out of the punit to satisfy this
* requirement.
*/
if (wm[0] == 0) {
wm[0] += 2;
for (level = 1; level <= max_level; level++) {
if (wm[level] == 0)
for (level = 1; level <= max_level; level++)
if (wm[level] != 0)
wm[level] += 2;
else {
for (i = level + 1; i <= max_level; i++)
wm[i] = 0;
 
break;
wm[level] += 2;
}
}
 
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
2285,34 → 2261,19
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
}
 
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
struct intel_crtc *intel_crtc;
 
/* Compute the currently _active_ config */
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
 
if (!wm->pipe_enabled)
continue;
 
config->sprites_enabled |= wm->sprites_enabled;
config->sprites_scaled |= wm->sprites_scaled;
config->num_pipes_active++;
}
}
 
/* Compute new watermarks for the pipe */
static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
struct intel_pipe_wm *pipe_wm)
static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = intel_crtc->base.dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = NULL;
struct intel_plane *intel_plane;
struct drm_plane_state *ps;
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
2320,12 → 2281,26
};
struct ilk_wm_maximums max;
 
cstate = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(cstate))
return PTR_ERR(cstate);
 
pipe_wm = &cstate->wm.optimal.ilk;
memset(pipe_wm, 0, sizeof(*pipe_wm));
 
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
sprstate = to_intel_plane_state(intel_plane->base.state);
break;
ps = drm_atomic_get_plane_state(state,
&intel_plane->base);
if (IS_ERR(ps))
return PTR_ERR(ps);
 
if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
pristate = to_intel_plane_state(ps);
else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
sprstate = to_intel_plane_state(ps);
else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
curstate = to_intel_plane_state(ps);
}
}
 
config.sprites_enabled = sprstate->visible;
config.sprites_scaled = sprstate->visible &&
2333,7 → 2308,7
drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
 
pipe_wm->pipe_enabled = cstate->base.active;
pipe_wm->sprites_enabled = sprstate->visible;
pipe_wm->sprites_enabled = config.sprites_enabled;
pipe_wm->sprites_scaled = config.sprites_scaled;
 
/* ILK/SNB: LP2+ watermarks only w/o sprites */
2344,10 → 2319,12
if (config.sprites_scaled)
max_level = 0;
 
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
pipe_wm->linetime = hsw_compute_linetime_wm(dev,
&intel_crtc->base);
 
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2354,7 → 2331,7
 
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
return false;
return -EINVAL;
 
ilk_compute_wm_reg_maximums(dev, 1, &max);
 
2361,7 → 2338,8
for (level = 1; level <= max_level; level++) {
struct intel_wm_level wm = {};
 
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
pristate, sprstate, curstate, &wm);
 
/*
* Disable any watermark level that exceeds the
2374,7 → 2352,7
pipe_wm->wm[level] = wm;
}
 
return true;
return 0;
}
 
/*
2389,7 → 2367,9
ret_wm->enable = true;
 
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
const struct intel_crtc_state *cstate =
to_intel_crtc_state(intel_crtc->base.state);
const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
const struct intel_wm_level *wm = &active->wm[level];
 
if (!active->pipe_enabled)
2460,7 → 2440,7
* enabled sometime later.
*/
if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
intel_fbc_enabled(dev_priv)) {
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
 
2537,14 → 2517,15
 
/* LP0 register values */
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_crtc_state *cstate =
to_intel_crtc_state(intel_crtc->base.state);
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.wm[0];
const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
 
if (WARN_ON(!r->enable))
continue;
 
results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
 
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2766,18 → 2747,40
#define SKL_DDB_SIZE 896 /* in blocks */
#define BXT_DDB_SIZE 512
 
/*
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
* plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
* other universal planes are in indices 1..n. Note that this may leave unused
* indices between the top "sprite" plane and the cursor.
*/
static int
skl_wm_plane_id(const struct intel_plane *plane)
{
switch (plane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
return 0;
case DRM_PLANE_TYPE_CURSOR:
return PLANE_CURSOR;
case DRM_PLANE_TYPE_OVERLAY:
return plane->plane + 1;
default:
MISSING_CASE(plane->base.type);
return plane->plane;
}
}
 
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc,
const struct intel_crtc_state *cstate,
const struct intel_wm_config *config,
const struct skl_pipe_wm_parameters *params,
struct skl_ddb_entry *alloc /* out */)
{
struct drm_crtc *for_crtc = cstate->base.crtc;
struct drm_crtc *crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
 
if (!params->active) {
if (!cstate->base.active) {
alloc->start = 0;
alloc->end = 0;
return;
2832,7 → 2835,10
memset(ddb, 0, sizeof(*ddb));
 
for_each_pipe(dev_priv, pipe) {
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
enum intel_display_power_domain power_domain;
 
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
 
for_each_plane(dev_priv, pipe, plane) {
2844,23 → 2850,35
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
 
intel_display_power_put(dev_priv, power_domain);
}
}
 
static unsigned int
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_framebuffer *fb = pstate->fb;
 
/* for planar format */
if (p->y_bytes_per_pixel) {
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
return intel_crtc->config->pipe_src_w *
intel_crtc->config->pipe_src_h *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
return (intel_crtc->config->pipe_src_w/2) *
(intel_crtc->config->pipe_src_h/2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
 
/* for packed formats */
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
return intel_crtc->config->pipe_src_w *
intel_crtc->config->pipe_src_h *
drm_format_plane_cpp(fb->pixel_format, 0);
}
 
/*
2869,37 → 2887,47
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
const struct skl_pipe_wm_parameters *params)
skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
const struct intel_plane *intel_plane;
unsigned int total_data_rate = 0;
int plane;
 
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
const struct drm_plane_state *pstate = intel_plane->base.state;
 
p = &params->plane[plane];
if (!p->enabled)
if (pstate->fb == NULL)
continue;
 
total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
if (p->y_bytes_per_pixel) {
total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
 
/* packed/uv */
total_data_rate += skl_plane_relative_data_rate(cstate,
pstate,
0);
 
if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
/* y-plane */
total_data_rate += skl_plane_relative_data_rate(cstate,
pstate,
1);
}
}
 
return total_data_rate;
}
 
static void
skl_allocate_pipe_ddb(struct drm_crtc *crtc,
const struct intel_wm_config *config,
const struct skl_pipe_wm_parameters *params,
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_wm_config *config = &dev_priv->wm.config;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane;
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
2906,9 → 2934,8
uint16_t minimum[I915_MAX_PLANES];
uint16_t y_minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
 
skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2925,17 → 2952,20
alloc->end -= cursor_blocks;
 
/* 1. Allocate the mininum required blocks for each active plane */
for_each_plane(dev_priv, pipe, plane) {
const struct intel_plane_wm_parameters *p;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
 
p = &params->plane[plane];
if (!p->enabled)
if (fb == NULL)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
 
minimum[plane] = 8;
alloc_size -= minimum[plane];
y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
alloc_size -= y_minimum[plane];
minimum[id] = 8;
alloc_size -= minimum[id];
y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
alloc_size -= y_minimum[id];
}
 
/*
2944,19 → 2974,22
*
* FIXME: we may not allocate every single block here.
*/
total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
total_data_rate = skl_get_total_relative_data_rate(cstate);
 
start = alloc->start;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane *plane = &intel_plane->base;
struct drm_plane_state *pstate = intel_plane->base.state;
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
 
p = &params->plane[plane];
if (!p->enabled)
if (pstate->fb == NULL)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
 
data_rate = skl_plane_relative_data_rate(p, 0);
data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
 
/*
* allocation for (packed formats) or (uv-plane part of planar format):
2963,12 → 2996,12
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = minimum[plane];
plane_blocks = minimum[id];
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
 
ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks;
ddb->plane[pipe][id].start = start;
ddb->plane[pipe][id].end = start + plane_blocks;
 
start += plane_blocks;
 
2975,14 → 3008,16
/*
* allocation for y_plane part of planar format:
*/
if (p->y_bytes_per_pixel) {
y_data_rate = skl_plane_relative_data_rate(p, 1);
y_plane_blocks = y_minimum[plane];
if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
y_data_rate = skl_plane_relative_data_rate(cstate,
pstate,
1);
y_plane_blocks = y_minimum[id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
total_data_rate);
 
ddb->y_plane[pipe][plane].start = start;
ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
ddb->y_plane[pipe][id].start = start;
ddb->y_plane[pipe][id].end = start + y_plane_blocks;
 
start += y_plane_blocks;
}
3052,104 → 3087,27
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
enum pipe pipe = intel_crtc->pipe;
 
if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
sizeof(new_ddb->plane[pipe])))
/*
* If ddb allocation of pipes changed, it may require recalculation of
* watermarks
*/
if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
return true;
 
if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
return true;
 
return false;
}
 
static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct intel_wm_config *config)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += to_intel_crtc(crtc)->active;
 
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
config->sprites_enabled |= intel_plane->wm.enabled;
config->sprites_scaled |= intel_plane->wm.scaled;
}
}
 
static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *p)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
 
p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
 
fb = crtc->primary->state->fb;
/* For planar: Bpp is for uv plane, y_Bpp is for y plane */
if (fb) {
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 0) : 0;
p->plane[0].tiling = fb->modifier[0];
} else {
p->plane[0].enabled = false;
p->plane[0].bytes_per_pixel = 0;
p->plane[0].y_bytes_per_pixel = 0;
p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
}
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->plane[0].rotation = crtc->primary->state->rotation;
 
fb = crtc->cursor->state->fb;
p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
if (fb) {
p->plane[PLANE_CURSOR].enabled = true;
p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
} else {
p->plane[PLANE_CURSOR].enabled = false;
p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
p->plane[PLANE_CURSOR].horiz_pixels = 64;
p->plane[PLANE_CURSOR].vert_pixels = 64;
}
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (intel_plane->pipe == pipe &&
plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
 
static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
struct intel_crtc_state *cstate,
struct intel_plane *intel_plane,
uint16_t ddb_allocation,
int level,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
3157,31 → 3115,33
uint32_t selected_result;
uint8_t bytes_per_pixel;
 
if (latency == 0 || !p->active || !p_params->enabled)
if (latency == 0 || !cstate->base.active || !fb)
return false;
 
bytes_per_pixel = p_params->y_bytes_per_pixel ?
p_params->y_bytes_per_pixel :
p_params->bytes_per_pixel;
method1 = skl_wm_method1(p->pixel_rate,
bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
bytes_per_pixel,
latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
cstate->pipe_src_w,
bytes_per_pixel,
p_params->tiling,
fb->modifier[0],
latency);
 
plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
if (intel_rotation_90_or_270(p_params->rotation)) {
switch (p_params->bytes_per_pixel) {
if (intel_rotation_90_or_270(plane->state->rotation)) {
int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
 
switch (bpp) {
case 1:
min_scanlines = 16;
break;
3205,8 → 3165,8
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
 
if (level >= 1 && level <= 7) {
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
res_lines += 4;
else
res_blocks++;
3223,84 → 3183,80
 
static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm_parameters *p,
enum pipe pipe,
struct intel_crtc_state *cstate,
int level,
int num_planes,
struct skl_wm_level *result)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_plane *intel_plane;
uint16_t ddb_blocks;
int i;
enum pipe pipe = intel_crtc->pipe;
 
for (i = 0; i < num_planes; i++) {
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
int i = skl_wm_plane_id(intel_plane);
 
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
result->plane_en[i] = skl_compute_plane_wm(dev_priv,
p, &p->plane[i],
cstate,
intel_plane,
ddb_blocks,
level,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
 
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
&p->plane[PLANE_CURSOR],
ddb_blocks, level,
&result->plane_res_b[PLANE_CURSOR],
&result->plane_res_l[PLANE_CURSOR]);
}
 
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
if (!to_intel_crtc(crtc)->active)
if (!cstate->base.active)
return 0;
 
if (WARN_ON(p->pixel_rate == 0))
if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
return 0;
 
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
skl_pipe_pixel_rate(cstate));
}
 
static void skl_compute_transition_wm(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *params,
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
struct skl_wm_level *trans_wm /* out */)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i;
struct intel_plane *intel_plane;
 
if (!params->active)
if (!cstate->base.active)
return;
 
/* Until we know more, just disable transition WMs */
for (i = 0; i < intel_num_planes(intel_crtc); i++)
for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
int i = skl_wm_plane_id(intel_plane);
 
trans_wm->plane_en[i] = false;
trans_wm->plane_en[PLANE_CURSOR] = false;
}
}
 
static void skl_compute_pipe_wm(struct drm_crtc *crtc,
static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm_parameters *params,
struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int level, max_level = ilk_wm_max_level(dev);
 
for (level = 0; level <= max_level; level++) {
skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
level, intel_num_planes(intel_crtc),
&pipe_wm->wm[level]);
skl_compute_wm_level(dev_priv, ddb, cstate,
level, &pipe_wm->wm[level]);
}
pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
 
skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
}
 
static void skl_compute_wm_results(struct drm_device *dev,
struct skl_pipe_wm_parameters *p,
struct skl_pipe_wm *p_wm,
struct skl_wm_values *r,
struct intel_crtc *intel_crtc)
3357,7 → 3313,8
r->wm_linetime[pipe] = p_wm->linetime;
}
 
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const struct skl_ddb_entry *entry)
{
if (entry->end)
3372,7 → 3329,7
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, crtc) {
int i, level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = crtc->pipe;
 
3544,21 → 3501,19
}
 
static bool skl_update_pipe_wm(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *params,
struct intel_wm_config *config,
struct skl_ddb_allocation *ddb, /* out */
struct skl_pipe_wm *pipe_wm /* out */)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
skl_compute_wm_pipe_parameters(crtc, params);
skl_allocate_pipe_ddb(crtc, config, params, ddb);
skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
skl_allocate_pipe_ddb(cstate, ddb);
skl_compute_pipe_wm(cstate, ddb, pipe_wm);
 
if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
return false;
 
intel_crtc->wm.skl_active = *pipe_wm;
intel_crtc->wm.active.skl = *pipe_wm;
 
return true;
}
3565,7 → 3520,6
 
static void skl_update_other_pipe_wm(struct drm_device *dev,
struct drm_crtc *crtc,
struct intel_wm_config *config,
struct skl_wm_values *r)
{
struct intel_crtc *intel_crtc;
3584,9 → 3538,7
* Otherwise, because of this_crtc being freshly enabled/disabled, the
* other active pipes need new DDB allocation and WM values.
*/
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
struct skl_pipe_wm_parameters params = {};
for_each_intel_crtc(dev, intel_crtc) {
struct skl_pipe_wm pipe_wm = {};
bool wm_changed;
 
3597,7 → 3549,6
continue;
 
wm_changed = skl_update_pipe_wm(&intel_crtc->base,
&params, config,
&r->ddb, &pipe_wm);
 
/*
3607,7 → 3558,7
*/
WARN_ON(!wm_changed);
 
skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
r->dirty[intel_crtc->pipe] = true;
}
}
3637,10 → 3588,9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_pipe_wm_parameters params = {};
struct skl_wm_values *results = &dev_priv->wm.skl_results;
struct skl_pipe_wm pipe_wm = {};
struct intel_wm_config config = {};
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
 
 
/* Clear all dirty flags */
3648,16 → 3598,13
 
skl_clear_wm(results, intel_crtc->pipe);
 
skl_compute_wm_global_parameters(dev, &config);
 
if (!skl_update_pipe_wm(crtc, &params, &config,
&results->ddb, &pipe_wm))
if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
return;
 
skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
results->dirty[intel_crtc->pipe] = true;
 
skl_update_other_pipe_wm(dev, crtc, &config, results);
skl_update_other_pipe_wm(dev, crtc, results);
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
 
3665,61 → 3612,33
dev_priv->wm.skl_hw = *results;
}
 
static void
skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enabled, bool scaled)
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane->state->fb;
struct intel_crtc *crtc;
 
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
/* Compute the currently _active_ config */
for_each_intel_crtc(dev, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
 
/* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
intel_plane->wm.bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
intel_plane->wm.y_bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
if (!wm->pipe_enabled)
continue;
 
/*
* Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case.
*/
if (fb)
intel_plane->wm.tiling = fb->modifier[0];
intel_plane->wm.rotation = plane->state->rotation;
 
skl_update_wm(crtc);
config->sprites_enabled |= wm->sprites_enabled;
config->sprites_scaled |= wm->sprites_scaled;
config->num_pipes_active++;
}
}
 
static void ilk_update_wm(struct drm_crtc *crtc)
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
struct intel_pipe_wm pipe_wm = {};
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
 
WARN_ON(cstate->base.active != intel_crtc->active);
 
intel_compute_pipe_wm(cstate, &pipe_wm);
 
if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
return;
 
intel_crtc->wm.active = pipe_wm;
 
ilk_compute_wm_config(dev, &config);
 
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3744,15 → 3663,14
ilk_write_wm_values(dev_priv, &results);
}
 
static void
ilk_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enabled, bool scaled)
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
WARN_ON(cstate->base.active != intel_crtc->active);
 
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
3760,10 → 3678,14
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
intel_wait_for_vblank(dev, intel_plane->pipe);
if (cstate->disable_lp_wm) {
ilk_disable_lp_wm(crtc->dev);
intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
}
 
ilk_update_wm(crtc);
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
 
ilk_program_watermarks(dev_priv);
}
 
static void skl_pipe_wm_active_state(uint32_t val,
3816,7 → 3738,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
enum pipe pipe = intel_crtc->pipe;
int level, i, max_level;
uint32_t temp;
3860,6 → 3783,8
 
temp = hw->plane_trans[pipe][PLANE_CURSOR];
skl_pipe_wm_active_state(temp, active, true, true, i, 0);
 
intel_crtc->wm.active.skl = *active;
}
 
void skl_wm_get_hw_state(struct drm_device *dev)
3879,9 → 3804,10
struct drm_i915_private *dev_priv = dev->dev_private;
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_pipe_wm *active = &intel_crtc->wm.active;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
enum pipe pipe = intel_crtc->pipe;
static const unsigned int wm0_pipe_reg[] = {
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
[PIPE_B] = WM0_PIPEB_ILK,
[PIPE_C] = WM0_PIPEC_IVB,
3920,6 → 3846,8
for (level = 0; level <= max_level; level++)
active->wm[level].enable = true;
}
 
intel_crtc->wm.active.ilk = *active;
}
 
#define _FW_WM(value, plane) \
4145,21 → 4073,6
dev_priv->display.update_wm(crtc);
}
 
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(plane, crtc,
sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
 
/**
* Lock protecting IPS related data structures
*/
4381,12 → 4294,6
break;
}
 
/* When byt can survive without system hang with dynamic
* sw freq adjustments, this restriction can be lifted.
*/
if (IS_VALLEYVIEW(dev_priv))
goto skip_hw_write;
 
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
4405,7 → 4312,6
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
 
skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
4416,9 → 4322,8
{
u32 mask = 0;
 
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
4435,7 → 4340,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4522,7 → 4427,7
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4536,13 → 4441,12
 
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
4590,7 → 4494,7
 
void intel_set_rps(struct drm_device *dev, u8 val)
{
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
4634,7 → 4538,7
 
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
{
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
mode = GEN6_RC_CTL_RC6_ENABLE;
else
4711,7 → 4615,8
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
4723,7 → 4628,7
dev_priv->rps.max_freq);
}
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4760,7 → 4665,7
gen6_init_rps_frequencies(dev);
 
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
4828,8 → 4733,8
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
/* WaRsUseTimeoutMode */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
5072,7 → 4977,7
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5090,7 → 4995,7
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
5225,7 → 5130,17
 
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
u32 val;
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
/*
* According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
* for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
* a BYT-M B0 the above register contains 0xbf. Moreover when setting
* a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
* to make sure it matches what Punit accepts.
*/
return max_t(u32, val, 0xc0);
}
 
/* Check that the pctx buffer wasn't move under us. */
6130,7 → 6045,17
 
void intel_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
if (!i915.enable_rc6) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
intel_runtime_pm_get(dev_priv);
}
 
if (IS_CHERRYVIEW(dev))
cherryview_init_gt_powersave(dev);
6140,10 → 6065,15
 
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_CHERRYVIEW(dev))
return;
else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
 
if (!i915.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
 
static void gen6_suspend_rps(struct drm_device *dev)
6218,7 → 6148,7
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rc6(dev);
gen9_enable_rps(dev);
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
__gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
6811,20 → 6741,9
 
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
u32 val;
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
val = I915_READ(DSPCLK_GATE_D);
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
 
/*
* Disable trickle feed and enable pnd deadline calculation
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
7091,7 → 7010,6
dev_priv->display.init_clock_gating =
bxt_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
 
7100,7 → 7018,7
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
7365,4 → 7283,6
INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
 
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
atomic_set(&dev_priv->pm.atomic_seq, 0);
}