Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4103 → Rev 4104

/drivers/video/drm/drm_cache.c
0,0 → 1,152
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
 
#include <linux/export.h>
#include <drm/drmP.h>
 
extern int x86_clflush_size;
 
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
#if 0
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
const int size = boot_cpu_data.x86_clflush_size;
 
if (unlikely(page == NULL))
return;
 
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual);
}
 
static void drm_cache_flush_clflush(struct page *pages[],
unsigned long num_pages)
{
unsigned long i;
 
mb();
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
mb();
}
 
static void
drm_clflush_ipi_handler(void *null)
{
wbinvd();
}
#endif
 
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
uint8_t *page_virtual;
unsigned int i, j;
 
page_virtual = AllocKernelSpace(4096);
 
if(page_virtual != NULL)
{
dma_addr_t *src, *dst;
u32 count;
 
for (i = 0; i < num_pages; i++)
{
mb();
// asm volatile("mfence");
 
MapPage(page_virtual,*pages++, 0x001);
for (j = 0; j < PAGE_SIZE; j += x86_clflush_size)
clflush(page_virtual + j);
mb();
}
FreeKernelSpace(page_virtual);
}
 
}
EXPORT_SYMBOL(drm_clflush_pages);
 
#if 0
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
struct sg_page_iter sg_iter;
 
mb();
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
 
return;
}
 
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
 
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
char *end = addr + length;
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
clflush(end - 1);
mb();
return;
}
 
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
 
#endif
/drivers/video/drm/drm_crtc.c
121,13 → 121,6
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
 
static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
{ DRM_MODE_DITHERING_AUTO, "Automatic" },
};
 
/*
* Non-global properties, but "required" for certain connectors.
*/
182,7 → 175,7
struct drm_conn_prop_enum_list {
int type;
const char *name;
int count;
struct ida ida;
};
 
/*
189,22 → 182,22
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
{ DRM_MODE_CONNECTOR_Composite, "Composite" },
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
{ DRM_MODE_CONNECTOR_Component, "Component" },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
};
 
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
216,6 → 209,22
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
 
void drm_connector_ida_init(void)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
ida_init(&drm_connector_enum_list[i].ida);
}
 
void drm_connector_ida_destroy(void)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
ida_destroy(&drm_connector_enum_list[i].ida);
}
 
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
673,7 → 682,7
}
EXPORT_SYMBOL(drm_mode_probed_add);
 
/**
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
* @mode: mode to remove
680,13 → 689,12
*
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
static void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_del(&mode->head);
drm_mode_destroy(connector->dev, mode);
}
EXPORT_SYMBOL(drm_mode_remove);
 
/**
* drm_connector_init - Init a preallocated connector
707,6 → 715,8
int connector_type)
{
int ret;
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
 
drm_modeset_lock_all(dev);
 
719,7 → 729,12
connector->funcs = funcs;
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
drm_mode_object_put(dev, &connector->base);
goto out;
}
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
760,6 → 775,9
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
 
ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
 
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
777,6 → 795,41
}
EXPORT_SYMBOL(drm_connector_unplug_all);
 
int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs)
{
int ret;
 
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
if (ret)
goto out;
 
bridge->dev = dev;
bridge->funcs = funcs;
 
list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
dev->mode_config.num_bridge++;
 
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_bridge_init);
 
void drm_bridge_cleanup(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
 
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &bridge->base);
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_bridge_cleanup);
 
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
1131,30 → 1184,6
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
/**
* drm_mode_create_dithering_property - create dithering property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*/
int drm_mode_create_dithering_property(struct drm_device *dev)
{
struct drm_property *dithering_mode;
 
if (dev->mode_config.dithering_mode_property)
return 0;
 
dithering_mode =
drm_property_create_enum(dev, 0, "dithering",
drm_dithering_mode_enum_list,
ARRAY_SIZE(drm_dithering_mode_enum_list));
dev->mode_config.dithering_mode_property = dithering_mode;
 
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dithering_property);
 
/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
1186,6 → 1215,7
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
total_objects += dev->mode_config.num_bridge;
 
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
1194,6 → 1224,7
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
group->num_bridges = 0;
return 0;
}
 
1203,6 → 1234,7
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_bridge *bridge;
int ret;
 
if ((ret = drm_mode_group_init(dev, group)))
1219,6 → 1251,11
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
 
list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors + group->num_bridges++] =
bridge->base.id;
 
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
2604,10 → 2641,22
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle)
ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
else
if (fb->funcs->create_handle) {
if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
} else {
/* GET_FB() is an unprivileged ioctl so we must not
* return a buffer-handle to non-master processes! For
* backwards-compatibility reasons, we cannot make
* GET_FB() privileged, so just return an invalid handle
* for non-masters. */
r->handle = 0;
ret = 0;
}
} else {
ret = -ENODEV;
}
 
drm_framebuffer_unreference(fb);
 
3721,6 → 3770,7
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.bridge_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
/drivers/video/drm/drm_crtc_helper.c
257,10 → 257,16
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
 
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
 
if (encoder->bridge)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
 
/**
424,6 → 430,16
 
if (encoder->crtc != crtc)
continue;
 
if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
ret = encoder->bridge->funcs->mode_fixup(
encoder->bridge, mode, adjusted_mode);
if (!ret) {
DRM_DEBUG_KMS("Bridge fixup failed\n");
goto done;
}
}
 
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
443,9 → 459,16
 
if (encoder->crtc != crtc)
continue;
 
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
 
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
 
if (encoder->bridge)
encoder->bridge->funcs->post_disable(encoder->bridge);
}
 
drm_crtc_prepare_encoders(dev);
469,6 → 492,10
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
 
if (encoder->bridge && encoder->bridge->funcs->mode_set)
encoder->bridge->funcs->mode_set(encoder->bridge, mode,
adjusted_mode);
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
479,9 → 506,14
if (encoder->crtc != crtc)
continue;
 
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
 
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
 
if (encoder->bridge)
encoder->bridge->funcs->enable(encoder->bridge);
}
 
/* Store real post-adjustment hardware mode. */
830,6 → 862,31
return dpms;
}
 
/* Helper which handles bridge ordering around encoder dpms */
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_bridge *bridge = encoder->bridge;
struct drm_encoder_helper_funcs *encoder_funcs;
 
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
bridge->funcs->pre_enable(bridge);
else
bridge->funcs->disable(bridge);
}
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
 
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
bridge->funcs->enable(bridge);
else
bridge->funcs->post_disable(bridge);
}
}
 
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
857,7 → 914,7
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
865,6 → 922,9
old_dpms = connector->dpms;
connector->dpms = mode;
 
if (encoder)
encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
 
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
873,22 → 933,14
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
if (encoder) {
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
}
}
 
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
if (encoder) {
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
}
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
924,9 → 976,8
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret;
int ret, encoder_dpms;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 
946,10 → 997,10
if(encoder->crtc != crtc)
continue;
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
encoder_dpms = drm_helper_choose_encoder_dpms(
encoder);
 
drm_helper_encoder_dpms(encoder, encoder_dpms);
}
 
crtc_funcs = crtc->helper_private;
/drivers/video/drm/drm_drv.c
0,0 → 1,70
/**
* \file drm_drv.c
* Generic driver template
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*
* To use this template, you must at least define the following (samples
* given for the MGA driver):
*
* \code
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
*
* #define DRIVER_NAME "mga"
* #define DRIVER_DESC "Matrox G200/G400"
* #define DRIVER_DATE "20001127"
*
* #define drm_x mga_##x
* \endcode
*/
 
/*
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
 
 
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
 
int drm_core_init(void)
{
int ret = -ENOMEM;
 
drm_global_init();
drm_connector_ida_init();
// idr_init(&drm_minors_idr);
return 0;
}
 
 
 
/drivers/video/drm/drm_edid.c
125,6 → 125,9
 
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
};
 
/*
931,6 → 934,36
.vrefresh = 100, },
};
 
/*
* HDMI 1.4 4k modes.
*/
static const struct drm_display_mode edid_4k_modes[] = {
/* 1 - 3840x2160@30Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4016, 4104, 4400, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4896, 4984, 5280, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
4096, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, },
};
 
/*** DDC fetch and block validation ***/
 
static const u8 edid_header[] = {
2287,7 → 2320,6
return closure.modes;
}
 
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
2298,10 → 2330,10
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
 
/**
/*
* Search EDID for CEA extension block.
*/
u8 *drm_find_cea_extension(struct edid *edid)
static u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
2322,7 → 2354,6
 
return edid_ext;
}
EXPORT_SYMBOL(drm_find_cea_extension);
 
/*
* Calculate the alternate clock for the CEA mode
2380,6 → 2411,54
}
EXPORT_SYMBOL(drm_match_cea_mode);
 
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
*
* It's almost like cea_mode_alternate_clock(), we just need to add an
* exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
* one.
*/
static unsigned int
hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
{
if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
return hdmi_mode->clock;
 
return cea_mode_alternate_clock(hdmi_mode);
}
 
/*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode
*
* An HDMI mode is one defined in the HDMI vendor specific block.
*
* Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
*/
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
{
u8 mode;
 
if (!to_match->clock)
return 0;
 
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
unsigned int clock1, clock2;
 
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
 
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks(to_match, hdmi_mode))
return mode + 1;
}
return 0;
}
 
static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{
2397,18 → 2476,26
* with the alternate clock for certain CEA modes.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode;
const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode;
u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
u8 mode_idx = drm_match_cea_mode(mode) - 1;
unsigned int clock1, clock2;
 
if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
cea_mode = &edid_cea_modes[mode_idx];
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
mode_idx = drm_match_hdmi_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
cea_mode = &edid_4k_modes[mode_idx];
clock2 = hdmi_mode_alternate_clock(cea_mode);
}
}
 
if (!cea_mode)
continue;
 
cea_mode = &edid_cea_modes[cea_mode_idx];
 
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
 
if (clock1 == clock2)
continue;
2442,10 → 2529,11
}
 
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
u8 * mode, cea_mode;
const u8 *mode;
u8 cea_mode;
int modes = 0;
 
for (mode = db; mode < db + len; mode++) {
2465,7 → 2553,69
return modes;
}
 
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
* Parses the HDMI VSDB looking for modes to add to @connector.
*/
static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
int modes = 0, offset = 0, i;
u8 vic_len;
 
if (len < 8)
goto out;
 
/* no HDMI_Video_Present */
if (!(db[8] & (1 << 5)))
goto out;
 
/* Latency_Fields_Present */
if (db[8] & (1 << 7))
offset += 2;
 
/* I_Latency_Fields_Present */
if (db[8] & (1 << 6))
offset += 2;
 
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
offset += 2;
if (len < (8 + offset))
goto out;
 
vic_len = db[8 + offset] >> 5;
 
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
struct drm_display_mode *newmode;
u8 vic;
 
vic = db[9 + offset + i];
 
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
continue;
}
 
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
continue;
 
drm_mode_probed_add(connector, newmode);
modes++;
}
 
out:
return modes;
}
 
static int
cea_db_payload_len(const u8 *db)
{
return db[0] & 0x1f;
2496,6 → 2646,21
return 0;
}
 
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
 
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
 
if (cea_db_payload_len(db) < 5)
return false;
 
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
 
return hdmi_id == HDMI_IEEE_OUI;
}
 
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
2502,8 → 2667,9
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
const u8 *cea = drm_find_cea_extension(edid);
const u8 *db;
u8 dbl;
int modes = 0;
 
if (cea && cea_revision(cea) >= 3) {
2518,6 → 2684,8
 
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
else if (cea_db_is_hdmi_vsdb(db))
modes += do_hdmi_vsdb_modes(connector, db, dbl);
}
}
 
2570,21 → 2738,6
*(u8 **)data = t->data.other_data.data.str.str;
}
 
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
 
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
 
if (cea_db_payload_len(db) < 5)
return false;
 
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
 
return hdmi_id == HDMI_IDENTIFIER;
}
 
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
2732,6 → 2885,60
EXPORT_SYMBOL(drm_edid_to_sad);
 
/**
* drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
* @edid: EDID to parse
* @sadb: pointer to the speaker block
*
* Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
* Note: returned pointer needs to be kfreed
*
* Return number of found Speaker Allocation Blocks or negative number on error.
*/
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
{
int count = 0;
int i, start, end, dbl;
const u8 *cea;
 
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
return -ENOENT;
}
 
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
return -ENOTSUPP;
}
 
if (cea_db_offsets(cea, &start, &end)) {
DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
return -EPROTO;
}
 
for_each_cea_db(cea, i, start, end) {
const u8 *db = &cea[i];
 
if (cea_db_tag(db) == SPEAKER_BLOCK) {
dbl = cea_db_payload_len(db);
 
/* Speaker Allocation Data Block */
if (dbl == 3) {
*sadb = kmalloc(dbl, GFP_KERNEL);
if (!*sadb)
return -ENOMEM;
memcpy(*sadb, &db[1], dbl);
count = dbl;
break;
}
}
}
 
return count;
}
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
 
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
3102,9 → 3309,10
if (err < 0)
return err;
 
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
 
frame->video_code = drm_match_cea_mode(mode);
if (!frame->video_code)
return 0;
 
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
3112,3 → 3320,39
return 0;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
* @frame: HDMI vendor infoframe
* @mode: DRM display mode
*
* Note that there's is a need to send HDMI vendor infoframes only when using a
* 4k or stereoscopic 3D mode. So when giving any other mode as input this
* function will return -EINVAL, error that can be safely ignored.
*
* Returns 0 on success or a negative error code on failure.
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode)
{
int err;
u8 vic;
 
if (!frame || !mode)
return -EINVAL;
 
vic = drm_match_hdmi_mode(mode);
if (!vic)
return -EINVAL;
 
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
 
frame->vic = vic;
 
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
/drivers/video/drm/drm_fb_helper.c
114,6 → 114,9
uint16_t *r_base, *g_base, *b_base;
int i;
 
if (helper->funcs->gamma_get == NULL)
return;
 
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
337,6 → 340,14
return 0;
}
 
/*
* The driver really shouldn't advertise pseudo/directcolor
* visuals if it can't deal with the palette.
*/
if (WARN_ON(!fb_helper->funcs->gamma_set ||
!fb_helper->funcs->gamma_get))
return -EINVAL;
 
pindex = regno;
 
if (fb->bits_per_pixel == 16) {
380,6 → 391,7
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
386,6 → 398,12
int i, j, rc = 0;
int start;
 
drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
}
 
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
408,10 → 426,13
 
rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
if (rc)
return rc;
goto out;
}
if (crtc_funcs->load_lut)
crtc_funcs->load_lut(crtc);
}
out:
drm_modeset_unlock_all(dev);
return rc;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
/drivers/video/drm/drm_gem.c
32,6 → 32,7
#include <linux/shmem_fs.h>
#include <linux/err.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
 
/** @file drm_gem.c
*
78,7 → 79,6
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
#endif
 
#if 0
/**
* Initialize the GEM device fields
*/
88,7 → 88,7
{
struct drm_gem_mm *mm;
 
spin_lock_init(&dev->object_name_lock);
mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
 
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
98,13 → 98,8
}
 
dev->mm_private = mm;
 
if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
 
drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
drm_vma_offset_manager_init(&mm->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
 
return 0;
115,12 → 110,10
{
struct drm_gem_mm *mm = dev->mm_private;
 
drm_mm_takedown(&mm->offset_manager);
drm_ht_remove(&mm->offset_hash);
drm_vma_offset_manager_destroy(&mm->vma_manager);
kfree(mm);
dev->mm_private = NULL;
}
#endif
 
/**
* Initialize an already allocated GEM object of the specified size with
129,16 → 122,14
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
struct file *filp;
 
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(obj->filp))
return PTR_ERR(obj->filp);
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
 
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
drm_gem_private_object_init(dev, obj, size);
obj->filp = filp;
 
return 0;
}
149,7 → 140,7
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
int drm_gem_private_object_init(struct drm_device *dev,
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
158,10 → 149,9
obj->filp = NULL;
 
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->handle_count = 0;
obj->size = size;
 
return 0;
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
 
194,8 → 184,59
}
EXPORT_SYMBOL(drm_gem_object_alloc);
 
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
}
 
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
static void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
 
/* Remove any name for this object */
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
/*
* The object name held a reference to this object, drop
* that now.
*
* This cannot be the last reference, since the handle holds one too.
*/
kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
 
 
static void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
if (WARN_ON(obj->handle_count == 0))
return;
 
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
 
mutex_lock(&obj->dev->object_name_lock);
if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj);
}
mutex_unlock(&obj->dev->object_name_lock);
 
drm_gem_object_unreference_unlocked(obj);
}
 
/**
* Removes the mapping from handle to filp for this object.
*/
int
248,7 → 289,7
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
255,6 → 296,8
struct drm_device *dev = obj->dev;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
 
/*
* Get the user-visible handle using idr. Preload and perform
* allocation under our spinlock.
263,14 → 306,22
spin_lock(&file_priv->table_lock);
 
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
 
drm_gem_object_reference(obj);
obj->handle_count++;
spin_unlock(&file_priv->table_lock);
idr_preload_end();
if (ret < 0)
mutex_unlock(&dev->object_name_lock);
if (ret < 0) {
drm_gem_object_handle_unreference_unlocked(obj);
return ret;
}
*handlep = ret;
 
drm_gem_object_handle_reference(obj);
// ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
// if (ret) {
// drm_gem_handle_delete(file_priv, *handlep);
// return ret;
// }
 
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
282,6 → 333,21
 
return 0;
}
 
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
mutex_lock(&obj->dev->object_name_lock);
 
return drm_gem_handle_create_tail(file_priv, obj, handlep);
}
EXPORT_SYMBOL(drm_gem_handle_create);
 
 
297,18 → 363,15
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list = &obj->map_list;
 
drm_ht_remove_item(&mm->offset_hash, &list->hash);
drm_mm_put_block(list->file_offset_node);
kfree(list->map);
list->map = NULL;
drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
* @obj: obj in question
* @size: the virtual size
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
315,16 → 378,15
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
* This routine allocates and attaches a fake offset for @obj, in cases where
* the virtual size differs from the physical size (ie. obj->size). Otherwise
* just use drm_gem_create_mmap_offset().
*/
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_local_map *map;
int ret;
 
/* Set the object up for mmap'ing */
list = &obj->map_list;
439,8 → 501,14
if (obj == NULL)
return -ENOENT;
 
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
spin_lock(&dev->object_name_lock);
/* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) {
ret = -ENOENT;
goto err;
}
 
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
if (ret < 0)
456,8 → 524,8
ret = 0;
 
err:
spin_unlock(&dev->object_name_lock);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
483,15 → 551,17
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
spin_lock(&dev->object_name_lock);
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
if (obj) {
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
if (!obj)
} else {
mutex_unlock(&dev->object_name_lock);
return -ENOENT;
}
 
ret = drm_gem_handle_create(file_priv, obj, &handle);
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
525,6 → 595,7
struct drm_device *dev = obj->dev;
 
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
575,41 → 646,7
}
EXPORT_SYMBOL(drm_gem_object_free);
 
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
}
 
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
 
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
*
* This cannot be the last reference, since the handle holds one too.
*/
kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
spin_unlock(&dev->object_name_lock);
 
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
 
#if 0
void drm_gem_vm_open(struct vm_area_struct *vma)
{
/drivers/video/drm/drm_hashtab.c
94,7 → 94,6
return NULL;
}
 
 
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
unsigned long key)
{
/drivers/video/drm/drm_irq.c
62,7 → 62,7
irqreturn_t device_irq_handler(struct drm_device *dev)
{
 
printf("video irq\n");
// printf("video irq\n");
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
116,6 → 116,7
ret = dev->driver->irq_postinstall(dev);
 
if (ret < 0) {
dev->irq_enabled = 0;
DRM_ERROR(__FUNCTION__);
}
 
128,11 → 129,6
EXPORT_SYMBOL(drm_irq_install);
 
 
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
 
 
u64 div64_u64(u64 dividend, u64 divisor)
260,7 → 256,3
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
 
 
 
 
/drivers/video/drm/drm_mm.c
49,59 → 49,19
 
#define MM_UNUSED_TARGET 4
 
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags flags);
 
if (atomic)
child = kzalloc(sizeof(*child), GFP_ATOMIC);
else
child = kzalloc(sizeof(*child), GFP_KERNEL);
 
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
if (list_empty(&mm->unused_nodes))
child = NULL;
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, node_list);
list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
}
return child;
}
 
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
 
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
 
if (unlikely(node == NULL)) {
int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
spin_unlock(&mm->unused_lock);
return ret;
}
++mm->num_unused;
list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
 
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
147,26 → 107,20
}
}
 
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic)
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole, *node;
unsigned long end = start + size;
struct drm_mm_node *hole;
unsigned long end = node->start + node->size;
unsigned long hole_start;
unsigned long hole_end;
 
BUG_ON(node == NULL);
 
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > start || hole_end < end)
if (hole_start > node->start || hole_end < end)
continue;
 
node = drm_mm_kmalloc(mm, atomic);
if (unlikely(node == NULL))
return NULL;
 
node->start = start;
node->size = size;
node->mm = mm;
node->allocated = 1;
 
173,7 → 127,7
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
 
if (start == hole_start) {
if (node->start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
184,32 → 138,15
node->hole_follows = 1;
}
 
return node;
return 0;
}
 
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
return NULL;
WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
node->start, node->size);
return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_create_block);
EXPORT_SYMBOL(drm_mm_reserve_node);
 
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic)
{
struct drm_mm_node *node;
 
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
 
drm_mm_insert_helper(hole_node, node, size, alignment, color);
 
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
 
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. The preallocated memory node
217,12 → 154,13
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color)
unsigned long color,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, 0);
color, flags);
if (!hole_node)
return -ENOSPC;
 
231,13 → 169,6
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
 
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
}
EXPORT_SYMBOL(drm_mm_insert_node);
 
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
290,27 → 221,6
}
}
 
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *node;
 
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
 
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
 
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
 
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
318,13 → 228,14
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
unsigned long start, unsigned long end)
unsigned long start, unsigned long end,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, 0);
start, end, flags);
if (!hole_node)
return -ENOSPC;
 
335,14 → 246,6
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
/**
* Remove a memory node from the allocator.
*/
351,6 → 254,9
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
 
if (WARN_ON(!node->allocated))
return;
 
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
 
377,28 → 283,6
}
EXPORT_SYMBOL(drm_mm_remove_node);
 
/*
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions.
*/
void drm_mm_put_block(struct drm_mm_node *node)
{
 
struct drm_mm *mm = node->mm;
 
drm_mm_remove_node(node);
 
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&node->node_list, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(node);
spin_unlock(&mm->unused_lock);
}
EXPORT_SYMBOL(drm_mm_put_block);
 
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
414,11 → 298,11
return end >= start + size;
}
 
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
441,7 → 325,7
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
if (!best_match)
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
 
if (entry->size < best_size) {
452,15 → 336,14
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_generic);
 
struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
488,7 → 371,7
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
if (!best_match)
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
 
if (entry->size < best_size) {
499,7 → 382,6
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
634,8 → 516,8
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_search_free with best_match = 0 will then return
* the just freed block (because its at the top of the free_stack list).
* immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
* return the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
672,10 → 554,7
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
 
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
695,23 → 574,9
 
void drm_mm_takedown(struct drm_mm * mm)
{
struct drm_mm_node *entry, *next;
 
if (WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean. Delaying takedown\n")) {
return;
WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean during takedown.\n");
}
 
spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
 
BUG_ON(mm->num_unused != 0);
}
EXPORT_SYMBOL(drm_mm_takedown);
 
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
/drivers/video/drm/drm_modes.c
594,27 → 594,6
EXPORT_SYMBOL(drm_mode_set_name);
 
/**
* drm_mode_list_concat - move modes from one list to another
* @head: source list
* @new: dst list
*
* LOCKING:
* Caller must ensure both lists are locked.
*
* Move all the modes from @head to @new.
*/
void drm_mode_list_concat(struct list_head *head, struct list_head *new)
{
 
struct list_head *entry, *tmp;
 
list_for_each_safe(entry, tmp, head) {
list_move_tail(entry, new);
}
}
EXPORT_SYMBOL(drm_mode_list_concat);
 
/**
* drm_mode_width - get the width of a mode
* @mode: mode
*
921,43 → 900,6
EXPORT_SYMBOL(drm_mode_validate_size);
 
/**
* drm_mode_validate_clocks - validate modes against clock limits
* @dev: DRM device
* @mode_list: list of modes to check
* @min: minimum clock rate array
* @max: maximum clock rate array
* @n_ranges: number of clock ranges (size of arrays)
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Some code may need to check a mode list against the clock limits of the
* device in question. This function walks the mode list, testing to make
* sure each mode falls within a given range (defined by @min and @max
* arrays) and sets @mode->status as needed.
*/
void drm_mode_validate_clocks(struct drm_device *dev,
struct list_head *mode_list,
int *min, int *max, int n_ranges)
{
struct drm_display_mode *mode;
int i;
 
list_for_each_entry(mode, mode_list, head) {
bool good = false;
for (i = 0; i < n_ranges; i++) {
if (mode->clock >= min[i] && mode->clock <= max[i]) {
good = true;
break;
}
}
if (!good)
mode->status = MODE_CLOCK_RANGE;
}
}
EXPORT_SYMBOL(drm_mode_validate_clocks);
 
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
/drivers/video/drm/drm_pci.c
84,7 → 84,331
return dmah;
}
 
EXPORT_SYMBOL(drm_pci_alloc);
 
#if 0
/**
* \brief Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
unsigned long addr;
size_t sz;
 
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
}
}
 
/**
* \brief Free a PCI consistent memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
}
 
EXPORT_SYMBOL(drm_pci_free);
 
 
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
/* For historical reasons, drm_get_pci_domain() is busticated
* on most archs and has to remain so for userspace interface
* < 1.4, except on alpha which was right from the beginning
*/
if (dev->if_version < 0x10004)
return 0;
#endif /* __alpha__ */
 
return pci_domain_nr(dev->pdev->bus);
}
 
static int drm_pci_get_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
 
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
 
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
 
 
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
 
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
 
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
 
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
 
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
 
return 0;
err:
return ret;
}
 
static int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
 
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
 
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
 
master->unique[master->unique_len] = '\0';
 
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
 
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
 
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
 
domain = bus >> 8;
bus &= 0xff;
 
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
err:
return ret;
}
 
 
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
 
p->irq = dev->pdev->irq;
 
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
 
static int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024);
}
}
return 0;
}
 
static void drm_pci_agp_destroy(struct drm_device *dev)
{
if (drm_core_has_AGP(dev) && dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_agp_clear(dev);
drm_agp_destroy(dev->agp);
dev->agp = NULL;
}
}
 
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
.agp_destroy = drm_pci_agp_destroy,
};
#endif
 
/**
* Register.
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
{
static struct drm_device drm_dev;
static struct drm_file drm_file;
 
struct drm_device *dev;
struct drm_file *priv;
 
int ret;
 
dev = &drm_dev;
priv = &drm_file;
 
drm_file_handlers[0] = priv;
 
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
 
pci_set_master(pdev);
 
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
 
DRM_DEBUG("\n");
 
 
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
 
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
 
// mutex_lock(&drm_global_mutex);
 
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
 
#if 0
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pci_set_drvdata(pdev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g2;
}
 
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
if (ret)
goto err_g21;
}
 
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
#endif
 
if (dev->driver->load) {
ret = dev->driver->load(dev, ent->driver_data);
if (ret)
goto err_g4;
}
 
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto err_g4;
}
 
 
// mutex_unlock(&drm_global_mutex);
return 0;
 
err_g4:
// drm_put_minor(&dev->primary);
err_g3:
// if (dev->render)
// drm_put_minor(&dev->render);
err_g21:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
err_g2:
// pci_disable_device(pdev);
err_g1:
// kfree(dev);
// mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
 
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
/drivers/video/drm/drm_rect.c
0,0 → 1,295
/*
* Copyright (C) 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
 
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <drm/drmP.h>
#include <drm/drm_rect.h>
 
/**
* drm_rect_intersect - intersect two rectangles
* @r1: first rectangle
* @r2: second rectangle
*
* Calculate the intersection of rectangles @r1 and @r2.
* @r1 will be overwritten with the intersection.
*
* RETURNS:
* %true if rectangle @r1 is still visible after the operation,
* %false otherwise.
*/
bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
{
r1->x1 = max(r1->x1, r2->x1);
r1->y1 = max(r1->y1, r2->y1);
r1->x2 = min(r1->x2, r2->x2);
r1->y2 = min(r1->y2, r2->y2);
 
return drm_rect_visible(r1);
}
EXPORT_SYMBOL(drm_rect_intersect);
 
/**
* drm_rect_clip_scaled - perform a scaled clip operation
* @src: source window rectangle
* @dst: destination window rectangle
* @clip: clip rectangle
* @hscale: horizontal scaling factor
* @vscale: vertical scaling factor
*
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
* same amounts multiplied by @hscale and @vscale.
*
* RETURNS:
* %true if rectangle @dst is still visible after being clipped,
* %false otherwise
*/
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip,
int hscale, int vscale)
{
int diff;
 
diff = clip->x1 - dst->x1;
if (diff > 0) {
int64_t tmp = src->x1 + (int64_t) diff * hscale;
src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = clip->y1 - dst->y1;
if (diff > 0) {
int64_t tmp = src->y1 + (int64_t) diff * vscale;
src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = dst->x2 - clip->x2;
if (diff > 0) {
int64_t tmp = src->x2 - (int64_t) diff * hscale;
src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
diff = dst->y2 - clip->y2;
if (diff > 0) {
int64_t tmp = src->y2 - (int64_t) diff * vscale;
src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
}
 
return drm_rect_intersect(dst, clip);
}
EXPORT_SYMBOL(drm_rect_clip_scaled);
 
static int drm_calc_scale(int src, int dst)
{
int scale = 0;
 
if (src < 0 || dst < 0)
return -EINVAL;
 
if (dst == 0)
return 0;
 
scale = src / dst;
 
return scale;
}
 
/**
* drm_rect_calc_hscale - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* RETURNS:
* The horizontal scaling factor, or errno of out of limits.
*/
int drm_rect_calc_hscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
 
if (hscale < 0 || dst_w == 0)
return hscale;
 
if (hscale < min_hscale || hscale > max_hscale)
return -ERANGE;
 
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale);
 
/**
* drm_rect_calc_vscale - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* RETURNS:
* The vertical scaling factor, or errno of out of limits.
*/
int drm_rect_calc_vscale(const struct drm_rect *src,
const struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
 
if (vscale < 0 || dst_h == 0)
return vscale;
 
if (vscale < min_vscale || vscale > max_vscale)
return -ERANGE;
 
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale);
 
/**
* drm_calc_hscale_relaxed - calculate the horizontal scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_hscale: minimum allowed horizontal scaling factor
* @max_hscale: maximum allowed horizontal scaling factor
*
* Calculate the horizontal scaling factor as
* (@src width) / (@dst width).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* RETURNS:
* The horizontal scaling factor.
*/
int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_hscale, int max_hscale)
{
int src_w = drm_rect_width(src);
int dst_w = drm_rect_width(dst);
int hscale = drm_calc_scale(src_w, dst_w);
 
if (hscale < 0 || dst_w == 0)
return hscale;
 
if (hscale < min_hscale) {
int max_dst_w = src_w / min_hscale;
 
drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
 
return min_hscale;
}
 
if (hscale > max_hscale) {
int max_src_w = dst_w * max_hscale;
 
drm_rect_adjust_size(src, max_src_w - src_w, 0);
 
return max_hscale;
}
 
return hscale;
}
EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
 
/**
* drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
* @src: source window rectangle
* @dst: destination window rectangle
* @min_vscale: minimum allowed vertical scaling factor
* @max_vscale: maximum allowed vertical scaling factor
*
* Calculate the vertical scaling factor as
* (@src height) / (@dst height).
*
* If the calculated scaling factor is below @min_vscale,
* decrease the height of rectangle @dst to compensate.
*
* If the calculated scaling factor is above @max_vscale,
* decrease the height of rectangle @src to compensate.
*
* RETURNS:
* The vertical scaling factor.
*/
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale)
{
int src_h = drm_rect_height(src);
int dst_h = drm_rect_height(dst);
int vscale = drm_calc_scale(src_h, dst_h);
 
if (vscale < 0 || dst_h == 0)
return vscale;
 
if (vscale < min_vscale) {
int max_dst_h = src_h / min_vscale;
 
drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
 
return min_vscale;
}
 
if (vscale > max_vscale) {
int max_src_h = dst_h * max_vscale;
 
drm_rect_adjust_size(src, 0, max_src_h - src_h);
 
return max_vscale;
}
 
return vscale;
}
EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
 
/**
* drm_rect_debug_print - print the rectangle information
* @r: rectangle to print
* @fixed_point: rectangle is in 16.16 fixed point format
*/
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
{
int w = drm_rect_width(r);
int h = drm_rect_height(r);
 
if (fixed_point)
DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
w >> 16, ((w & 0xffff) * 15625) >> 10,
h >> 16, ((h & 0xffff) * 15625) >> 10,
r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
else
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);
/drivers/video/drm/drm_stub.c
35,6 → 35,7
 
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
 
struct va_format {
const char *fmt;
44,6 → 45,9
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
 
unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
EXPORT_SYMBOL(drm_rnodes);
 
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
 
50,6 → 54,7
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
 
struct idr drm_minors_idr;
int drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
86,6 → 91,45
}
EXPORT_SYMBOL(drm_ut_debug_printk);
 
int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
 
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
 
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
// if (drm_ht_create(&dev->map_hash, 12)) {
// return -ENOMEM;
// }
 
dev->driver = driver;
 
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
 
return 0;
 
error_out_unreg:
// drm_lastclose(dev);
return retcode;
}
EXPORT_SYMBOL(drm_fill_in_dev);
/**
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
/drivers/video/drm/drm_vma_manager.c
0,0 → 1,436
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2012 David Airlie <airlied@linux.ie>
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <drm/drm_vma_manager.h>
//#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
 
/**
* DOC: vma offset manager
*
* The vma-manager is responsible to map arbitrary driver-dependent memory
* regions into the linear user address-space. It provides offsets to the
* caller which can then be used on the address_space of the drm-device. It
* takes care to not overlap regions, size them appropriately and to not
* confuse mm-core by inconsistent fake vm_pgoff fields.
* Drivers shouldn't use this for object placement in VMEM. This manager should
* only be used to manage mappings into linear user-space VMs.
*
* We use drm_mm as backend to manage object allocations. But it is highly
* optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
* speed up offset lookups.
*
* You must not use multiple offset managers on a single address_space.
* Otherwise, mm-core will be unable to tear down memory mappings as the VM will
* no longer be linear. Please use VM_NONLINEAR in that case and implement your
* own offset managers.
*
* This offset manager works on page-based addresses. That is, every argument
* and return code (with the exception of drm_vma_node_offset_addr()) is given
* in number of pages, not number of bytes. That means, object sizes and offsets
* must always be page-aligned (as usual).
* If you want to get a valid byte-based user-space address for a given offset,
* please see drm_vma_node_offset_addr().
*
* Additionally to offset management, the vma offset manager also handles access
* management. For every open-file context that is allowed to access a given
* node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
* open-file with the offset of the node will fail with -EACCES. To revoke
* access again, use drm_vma_node_revoke(). However, the caller is responsible
* for destroying already existing mappings, if required.
*/
 
/**
* drm_vma_offset_manager_init - Initialize new offset-manager
* @mgr: Manager object
* @page_offset: Offset of available memory area (page-based)
* @size: Size of available address space range (page-based)
*
* Initialize a new offset-manager. The offset and area size available for the
* manager are given as @page_offset and @size. Both are interpreted as
* page-numbers, not bytes.
*
* Adding/removing nodes from the manager is locked internally and protected
* against concurrent access. However, node allocation and destruction is left
* for the caller. While calling into the vma-manager, a given node must
* always be guaranteed to be referenced.
*/
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
unsigned long page_offset, unsigned long size)
{
rwlock_init(&mgr->vm_lock);
mgr->vm_addr_space_rb = RB_ROOT;
drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
EXPORT_SYMBOL(drm_vma_offset_manager_init);
 
/**
* drm_vma_offset_manager_destroy() - Destroy offset manager
* @mgr: Manager object
*
* Destroy an object manager which was previously created via
* drm_vma_offset_manager_init(). The caller must remove all allocated nodes
* before destroying the manager. Otherwise, drm_mm will refuse to free the
* requested resources.
*
* The manager must not be accessed after this function is called.
*/
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
{
/* take the lock to protect against buggy drivers */
write_lock(&mgr->vm_lock);
drm_mm_takedown(&mgr->vm_addr_space_mm);
write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
 
/**
* drm_vma_offset_lookup() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Find a node given a start address and object size. This returns the _best_
* match for the given node. That is, @start may point somewhere into a valid
* region and the given node will be returned, as long as the node spans the
* whole requested area (given the size in number of pages as @pages).
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned. It's the caller's responsibility to make sure the node doesn't
* get destroyed before the caller can access it.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
 
read_lock(&mgr->vm_lock);
node = drm_vma_offset_lookup_locked(mgr, start, pages);
read_unlock(&mgr->vm_lock);
 
return node;
}
EXPORT_SYMBOL(drm_vma_offset_lookup);
 
/**
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
* manually. See drm_vma_offset_lock_lookup() for an example.
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node, *best;
struct rb_node *iter;
unsigned long offset;
 
iter = mgr->vm_addr_space_rb.rb_node;
best = NULL;
 
while (likely(iter)) {
node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
offset = node->vm_node.start;
if (start >= offset) {
iter = iter->rb_right;
best = node;
if (start == offset)
break;
} else {
iter = iter->rb_left;
}
}
 
/* verify that the node spans the requested area */
if (best) {
offset = best->vm_node.start + best->vm_node.size;
if (offset < start + pages)
best = NULL;
}
 
return best;
}
EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
 
/* internal helper to link @node into the rb-tree */
static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node)
{
struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
struct rb_node *parent = NULL;
struct drm_vma_offset_node *iter_node;
 
while (likely(*iter)) {
parent = *iter;
iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
 
if (node->vm_node.start < iter_node->vm_node.start)
iter = &(*iter)->rb_left;
else if (node->vm_node.start > iter_node->vm_node.start)
iter = &(*iter)->rb_right;
else
BUG();
}
 
rb_link_node(&node->vm_rb, parent, iter);
rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
}
 
/**
* drm_vma_offset_add() - Add offset node to manager
* @mgr: Manager object
* @node: Node to be added
* @pages: Allocation size visible to user-space (in number of pages)
*
* Add a node to the offset-manager. If the node was already added, this does
* nothing and return 0. @pages is the size of the object given in number of
* pages.
* After this call succeeds, you can access the offset of the node until it
* is removed again.
*
* If this call fails, it is safe to retry the operation or call
* drm_vma_offset_remove(), anyway. However, no cleanup is required in that
* case.
*
* @pages is not required to be the same size as the underlying memory object
* that you want to map. It only limits the size that user-space can map into
* their address space.
*
* RETURNS:
* 0 on success, negative error code on failure.
*/
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node, unsigned long pages)
{
int ret;
 
write_lock(&mgr->vm_lock);
 
if (drm_mm_node_allocated(&node->vm_node)) {
ret = 0;
goto out_unlock;
}
 
ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
pages, 0, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto out_unlock;
 
_drm_vma_offset_add_rb(mgr, node);
 
out_unlock:
write_unlock(&mgr->vm_lock);
return ret;
}
EXPORT_SYMBOL(drm_vma_offset_add);
 
/**
* drm_vma_offset_remove() - Remove offset node from manager
* @mgr: Manager object
* @node: Node to be removed
*
* Remove a node from the offset manager. If the node wasn't added before, this
* does nothing. After this call returns, the offset and size will be 0 until a
* new offset is allocated via drm_vma_offset_add() again. Helper functions like
* drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
* offset is allocated.
*/
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node)
{
write_lock(&mgr->vm_lock);
 
if (drm_mm_node_allocated(&node->vm_node)) {
rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
drm_mm_remove_node(&node->vm_node);
memset(&node->vm_node, 0, sizeof(node->vm_node));
}
 
write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_remove);
 
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
* @filp: Open file to add
*
* Add @filp to the list of allowed open-files for this node. If @filp is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
struct drm_vma_offset_file *new, *entry;
int ret = 0;
 
/* Preallocate entry to avoid atomic allocations below. It is quite
* unlikely that an open-file is added twice to a single node so we
* don't optimize for this case. OOM is checked below only if the entry
* is actually used. */
new = kmalloc(sizeof(*entry), GFP_KERNEL);
 
write_lock(&node->vm_lock);
 
iter = &node->vm_files.rb_node;
 
while (likely(*iter)) {
parent = *iter;
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
 
if (filp == entry->vm_filp) {
entry->vm_count++;
goto unlock;
} else if (filp > entry->vm_filp) {
iter = &(*iter)->rb_right;
} else {
iter = &(*iter)->rb_left;
}
}
 
if (!new) {
ret = -ENOMEM;
goto unlock;
}
 
new->vm_filp = filp;
new->vm_count = 1;
rb_link_node(&new->vm_rb, parent, iter);
rb_insert_color(&new->vm_rb, &node->vm_files);
new = NULL;
 
unlock:
write_unlock(&node->vm_lock);
kfree(new);
return ret;
}
EXPORT_SYMBOL(drm_vma_node_allow);
 
/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
* @filp: Open file to remove
*
* Decrement the ref-count of @filp in the list of allowed open-files on @node.
* If the ref-count drops to zero, remove @filp from the list. You must call
* this once for every drm_vma_node_allow() on @filp.
*
* This is locked against concurrent access internally.
*
* If @filp is not on the list, nothing is done.
*/
void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
 
write_lock(&node->vm_lock);
 
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
if (filp == entry->vm_filp) {
if (!--entry->vm_count) {
rb_erase(&entry->vm_rb, &node->vm_files);
kfree(entry);
}
break;
} else if (filp > entry->vm_filp) {
iter = iter->rb_right;
} else {
iter = iter->rb_left;
}
}
 
write_unlock(&node->vm_lock);
}
EXPORT_SYMBOL(drm_vma_node_revoke);
 
/**
* drm_vma_node_is_allowed - Check whether an open-file is granted access
* @node: Node to check
* @filp: Open-file to check for
*
* Search the list in @node whether @filp is currently on the list of allowed
* open-files (see drm_vma_node_allow()).
*
* This is locked against concurrent access internally.
*
* RETURNS:
* true iff @filp is on the list
*/
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
 
read_lock(&node->vm_lock);
 
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
if (filp == entry->vm_filp)
break;
else if (filp > entry->vm_filp)
iter = iter->rb_right;
else
iter = iter->rb_left;
}
 
read_unlock(&node->vm_lock);
 
return iter;
}
EXPORT_SYMBOL(drm_vma_node_is_allowed);
/drivers/video/drm/hdmi.c
0,0 → 1,436
/*
* Copyright (C) 2012 Avionic Design GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/string.h>
 
static void hdmi_infoframe_checksum(void *buffer, size_t size)
{
u8 *ptr = buffer;
u8 csum = 0;
size_t i;
 
/* compute checksum */
for (i = 0; i < size; i++)
csum += ptr[i];
 
ptr[3] = 256 - csum;
}
 
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
 
return 0;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
 
/**
* hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
* @frame: HDMI AVI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size)
{
u8 *ptr = buffer;
size_t length;
 
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
if (size < length)
return -ENOSPC;
 
memset(buffer, 0, size);
 
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
 
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
 
ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
 
/*
* Data byte 1, bit 4 has to be set if we provide the active format
* aspect ratio
*/
if (frame->active_aspect & 0xf)
ptr[0] |= BIT(4);
 
/* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
if (frame->top_bar || frame->bottom_bar)
ptr[0] |= BIT(3);
 
if (frame->left_bar || frame->right_bar)
ptr[0] |= BIT(2);
 
ptr[1] = ((frame->colorimetry & 0x3) << 6) |
((frame->picture_aspect & 0x3) << 4) |
(frame->active_aspect & 0xf);
 
ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
((frame->quantization_range & 0x3) << 2) |
(frame->nups & 0x3);
 
if (frame->itc)
ptr[2] |= BIT(7);
 
ptr[3] = frame->video_code & 0x7f;
 
ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
((frame->content_type & 0x3) << 4) |
(frame->pixel_repeat & 0xf);
 
ptr[5] = frame->top_bar & 0xff;
ptr[6] = (frame->top_bar >> 8) & 0xff;
ptr[7] = frame->bottom_bar & 0xff;
ptr[8] = (frame->bottom_bar >> 8) & 0xff;
ptr[9] = frame->left_bar & 0xff;
ptr[10] = (frame->left_bar >> 8) & 0xff;
ptr[11] = frame->right_bar & 0xff;
ptr[12] = (frame->right_bar >> 8) & 0xff;
 
hdmi_infoframe_checksum(buffer, length);
 
return length;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
 
/**
* hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
* @frame: HDMI SPD infoframe
* @vendor: vendor string
* @product: product string
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
frame->length = HDMI_SPD_INFOFRAME_SIZE;
 
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
strncpy(frame->product, product, sizeof(frame->product));
 
return 0;
}
EXPORT_SYMBOL(hdmi_spd_infoframe_init);
 
/**
* hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
* @frame: HDMI SPD infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size)
{
u8 *ptr = buffer;
size_t length;
 
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
if (size < length)
return -ENOSPC;
 
memset(buffer, 0, size);
 
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
 
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
 
memcpy(ptr, frame->vendor, sizeof(frame->vendor));
memcpy(ptr + 8, frame->product, sizeof(frame->product));
 
ptr[24] = frame->sdi;
 
hdmi_infoframe_checksum(buffer, length);
 
return length;
}
EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
 
/**
* hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
* @frame: HDMI audio infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
frame->version = 1;
frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
 
return 0;
}
EXPORT_SYMBOL(hdmi_audio_infoframe_init);
 
/**
* hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size)
{
unsigned char channels;
u8 *ptr = buffer;
size_t length;
 
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
if (size < length)
return -ENOSPC;
 
memset(buffer, 0, size);
 
if (frame->channels >= 2)
channels = frame->channels - 1;
else
channels = 0;
 
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
 
/* start infoframe payload */
ptr += HDMI_INFOFRAME_HEADER_SIZE;
 
ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
(frame->sample_size & 0x3);
ptr[2] = frame->coding_type_ext & 0x1f;
ptr[3] = frame->channel_allocation;
ptr[4] = (frame->level_shift_value & 0xf) << 3;
 
if (frame->downmix_inhibit)
ptr[4] |= BIT(7);
 
hdmi_infoframe_checksum(buffer, length);
 
return length;
}
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
 
/**
* hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
frame->version = 1;
 
frame->oui = HDMI_IEEE_OUI;
 
/*
* 0 is a valid value for s3d_struct, so we use a special "not set"
* value
*/
frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
 
return 0;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
 
/**
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
 
/* empty info frame */
if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
 
/* only one of those can be supplied */
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
 
/* for side by side (half) we also need to provide 3D_Ext_Data */
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
frame->length = 6;
else
frame->length = 5;
 
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
 
if (size < length)
return -ENOSPC;
 
memset(buffer, 0, size);
 
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
 
/* HDMI OUI */
ptr[4] = 0x03;
ptr[5] = 0x0c;
ptr[6] = 0x00;
 
if (frame->vic) {
ptr[7] = 0x1 << 5; /* video format */
ptr[8] = frame->vic;
} else {
ptr[7] = 0x2 << 5; /* video format */
ptr[8] = (frame->s3d_struct & 0xf) << 4;
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
}
 
hdmi_infoframe_checksum(buffer, length);
 
return length;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
 
/*
* hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
*/
static ssize_t
hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
void *buffer, size_t size)
{
/* we only know about HDMI vendor infoframes */
if (frame->any.oui != HDMI_IEEE_OUI)
return -EINVAL;
 
return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
}
 
/**
* hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
* Packs the information contained in the @frame structure into a binary
* representation that can be written into the corresponding controller
* registers. Also computes the checksum as required by section 5.3.5 of
* the HDMI 1.4 specification.
*
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
{
ssize_t length;
 
switch (frame->any.type) {
case HDMI_INFOFRAME_TYPE_AVI:
length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_SPD:
length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
buffer, size);
break;
default:
WARN(1, "Bad infoframe type %d\n", frame->any.type);
length = -EINVAL;
}
 
return length;
}
EXPORT_SYMBOL(hdmi_infoframe_pack);
/drivers/video/drm/i2c/i2c-algo-bit.c
27,6 → 27,7
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <syscall.h>
#include <linux/jiffies.h>
#include <errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
43,7 → 44,7
} while (0)
#else
#define bit_dbg(level, dev, format, args...) \
do { /* dbgprintf(format, ##args); */ } while (0)
do {} while (0)
#endif /* DEBUG */
 
/* ----- global variables --------------------------------------------- */
/drivers/video/drm/i2c/i2c-core.c
32,6 → 32,7
#include <errno.h>
#include <linux/i2c.h>
#include <syscall.h>
#include <linux/jiffies.h>
 
 
 
104,7 → 105,7
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
NULL
)
};
 
/drivers/video/drm/i915/Gtt/intel-agp.c
25,7 → 25,7
#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
 
 
int intel_gmch_probe(struct pci_dev *pdev,
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
 
int intel_agp_enabled;
62,7 → 62,8
 
bridge->capndx = cap_ptr;
 
if (intel_gmch_probe(pdev, bridge))
if (intel_gmch_probe(pdev, NULL, bridge))
 
{
// pci_set_drvdata(pdev, bridge);
// err = agp_add_bridge(bridge);
/drivers/video/drm/i915/Gtt/intel-gtt.c
363,6 → 363,40
intel_gtt_teardown_scratch_page();
}
 
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline int needs_ilk_vtd_wa(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
 
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
#endif
return 0;
}
 
static bool intel_gtt_can_wc(void)
{
if (INTEL_GTT_GEN <= 2)
return false;
 
if (INTEL_GTT_GEN >= 6)
return false;
 
/* Reports of major corruption with ILK vt'd enabled */
if (needs_ilk_vtd_wa())
return false;
 
return true;
}
 
static int intel_gtt_init(void)
{
u32 gma_addr;
/drivers/video/drm/i915/Makefile
0,0 → 1,124
 
 
CC = gcc.exe
FASM = e:/fasm/fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm \
-I./ -I./render
 
CFLAGS= -c -O2 $(INCLUDES) -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
 
LIBPATH:= $(DDK_TOPDIR)/
 
LIBS:= -lddk -lcore -lgcc
 
LDFLAGS = -nostdlib -shared -s -Map i915.map --image-base 0\
--file-alignment 512 --section-alignment 4096
 
 
NAME:= i915
 
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h \
i915_drv.h \
bitmap.h
 
NAME_SRC= main.c \
pci.c \
dvo_ch7017.c \
dvo_ch7xxx.c \
dvo_ivch.c \
dvo_ns2501.c \
dvo_sil164.c \
dvo_tfp410.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_gtt.c \
i915_gem_execbuffer.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_irq.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dvo.c \
intel_fb.c \
intel_hdmi.c \
intel_i2c.c \
intel_lvds.c \
intel_modes.c \
intel_opregion.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sideband.c \
intel_sprite.c \
intel_uncore.c \
kms_display.c \
utils.c \
../hdmi.c \
Gtt/intel-agp.c \
Gtt/intel-gtt.c \
../drm_global.c \
../drm_drv.c \
../drm_vma_manager.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c
 
SRC_DEP:=
 
 
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC))))
 
 
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) i915.lds Makefile
ld -L$(LIBPATH) $(LDFLAGS) -T i915.lds -o $@ $(NAME_OBJS) $(LIBS)
 
 
%.o : %.c $(HFILES) Makefile
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
 
%.o : %.S $(HFILES) Makefile
as -o $@ $<
 
 
clean:
-rm -f ../*/*.o
 
 
/drivers/video/drm/i915/Makefile.lto
0,0 → 1,123
 
CC = gcc
FASM = e:/fasm/fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm \
-I./ -I./render
 
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
 
LIBPATH:= $(DDK_TOPDIR)
 
LIBS:= libddk.a libcore.a libgcc.a
 
LDFLAGS = -e,_drvEntry,-nostdlib,-shared,-s,--image-base,0,--file-alignment,512,--section-alignment,4096
 
 
NAME:= i915
 
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h \
i915_drv.h \
bitmap.h
 
NAME_SRC= main.c \
pci.c \
dvo_ch7017.c \
dvo_ch7xxx.c \
dvo_ivch.c \
dvo_ns2501.c \
dvo_sil164.c \
dvo_tfp410.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_gtt.c \
i915_gem_execbuffer.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_irq.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dvo.c \
intel_fb.c \
intel_hdmi.c \
intel_i2c.c \
intel_lvds.c \
intel_modes.c \
intel_opregion.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sideband.c \
intel_sprite.c \
intel_uncore.c \
kms_display.c \
utils.c \
../hdmi.c \
Gtt/intel-agp.c \
Gtt/intel-gtt.c \
../drm_global.c \
../drm_drv.c \
../drm_vma_manager.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c
 
SRC_DEP:=
 
 
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC))))
 
 
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) i915.lds Makefile.lto
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,i915.lds -o $@ $(NAME_OBJS) $(LIBS)
 
 
%.o : %.c $(HFILES) Makefile.lto
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
 
%.o : %.S $(HFILES) Makefile.lto
as -o $@ $<
 
 
clean:
-rm -f ../*/*.o
 
 
/drivers/video/drm/i915/dvo_ch7xxx.c
32,6 → 32,7
#define CH7xxx_REG_DID 0x4b
 
#define CH7011_VID 0x83 /* 7010 as well */
#define CH7010B_VID 0x05
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
38,6 → 39,7
 
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
#define CH7010_DID 0x16
 
#define CH7xxx_NUM_REGS 0x4c
 
87,11 → 89,20
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
{ CH7010B_VID, "CH7010B" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
 
static struct ch7xxx_did_struct {
uint8_t did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
{ CH7010_DID, "CH7010B" },
};
 
struct ch7xxx_priv {
bool quiet;
};
108,6 → 119,18
return NULL;
}
 
static char *ch7xxx_get_did(uint8_t did)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
if (ch7xxx_dids[i].did == did)
return ch7xxx_dids[i].name;
}
 
return NULL;
}
 
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
179,7 → 202,7
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
char *name;
char *name, *devid;
 
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
204,7 → 227,8
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
 
if (device != CH7xxx_DID) {
devid = ch7xxx_get_did(device);
if (!devid) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
283,7 → 307,7
idf |= CH7xxx_IDF_HSP;
 
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
idf |= CH7xxx_IDF_HSP;
idf |= CH7xxx_IDF_VSP;
 
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
/drivers/video/drm/i915/i915_dma.c
45,7 → 45,6
 
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
 
 
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
#define BEGIN_LP_RING(n) \
1004,8 → 1003,7
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
}
 
1150,8 → 1148,8
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
 
// intel_register_dsm_handler();
 
 
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
1177,10 → 1175,8
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->mm.suspended = 0;
if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
}
 
ret = intel_fbdev_init(dev);
if (ret)
1206,9 → 1202,6
 
drm_kms_helper_poll_init(dev);
 
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
 
return 0;
 
cleanup_gem:
1235,34 → 1228,22
{
const struct intel_device_info *info = dev_priv->info;
 
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
#define DEV_INFO_SEP ,
#define PRINT_S(name) "%s"
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
DEV_INFO_FLAGS);
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
#undef PRINT_FLAG
#undef SEP_COMMA
}
 
/**
* intel_early_sanitize_regs - clean up BIOS state
* @dev: DRM device
*
* This function must be called before we do any I915_READ or I915_WRITE. Its
* purpose is to clean up any state left by the BIOS that may affect us when
* reading and/or writing registers.
*/
static void intel_early_sanitize_regs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev))
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
 
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
1291,8 → 1272,33
dev_priv->dev = dev;
dev_priv->info = info;
 
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->backlight.lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
 
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
 
i915_dump_device_info(dev_priv);
 
/* Not all pre-production machines fall into this category, only the
* very first ones. Almost everything should work, except for maybe
* suspend/resume. And we don't implement workarounds that affect only
* pre-production machines. */
if (IS_HSW_EARLY_SDV(dev))
DRM_INFO("This is an early pre-production Haswell machine. "
"It may not be fully functional.\n");
 
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
1318,8 → 1324,18
goto put_bridge;
}
 
intel_early_sanitize_regs(dev);
intel_uncore_early_sanitize(dev);
 
if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
 
ret = i915_gem_gtt_init(dev);
if (ret)
goto put_bridge;
1366,7 → 1382,9
intel_detect_pch(dev);
 
intel_irq_init(dev);
intel_gt_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev);
intel_uncore_init(dev);
 
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
1389,25 → 1407,10
* stuck interrupts on some machines.
*/
 
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
mutex_init(&dev_priv->dpio_lock);
 
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
 
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
 
// ret = drm_vblank_init(dev, dev_priv->num_pipe);
// if (ret)
// goto out_gem_unload;
 
/* Start out suspended */
dev_priv->mm.suspended = 1;
 
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
1420,6 → 1423,8
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
 
main_device = dev;
 
return 0;
 
out_gem_unload:
1433,17 → 1438,11
// intel_teardown_mchbar(dev);
// destroy_workqueue(dev_priv->wq);
out_mtrrfree:
// if (dev_priv->mm.gtt_mtrr >= 0) {
// mtrr_del(dev_priv->mm.gtt_mtrr,
// dev_priv->mm.gtt_base_addr,
// aperture_size);
// dev_priv->mm.gtt_mtrr = -1;
// }
// io_mapping_free(dev_priv->mm.gtt_mapping);
// arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
// io_mapping_free(dev_priv->gtt.mappable);
// dev_priv->gtt.gtt_remove(dev);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
// dev_priv->gtt.gtt_remove(dev);
put_bridge:
// pci_dev_put(dev_priv->bridge_dev);
free_priv:
1460,9 → 1459,17
 
intel_gpu_ips_teardown();
 
if (HAS_POWER_WELL(dev)) {
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_set_power_well(dev, true);
i915_remove_power_well(dev);
}
 
i915_teardown_sysfs(dev);
 
if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
mutex_lock(&dev->struct_mutex);
1476,12 → 1483,7
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->gtt.mappable_base,
dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
arch_phys_wc_del(dev_priv->gtt.mtrr);
 
acpi_video_unregister();
 
1494,10 → 1496,10
* free the memory space allocated for the child device
* config parsed from VBT
*/
if (dev_priv->child_dev && dev_priv->child_dev_num) {
kfree(dev_priv->child_dev);
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL;
dev_priv->vbt.child_dev_num = 0;
}
 
vga_switcheroo_unregister_client(dev->pdev);
1530,6 → 1532,9
i915_free_hws(dev);
}
 
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
 
1539,6 → 1544,8
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
 
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
 
1615,7 → 1622,7
kfree(file_priv);
}
 
struct drm_ioctl_desc i915_ioctls[] = {
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1622,7 → 1629,7
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1635,35 → 1642,35
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
 
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
/drivers/video/drm/i915/i915_drv.c
47,10 → 47,6
 
int init_display_kms(struct drm_device *dev);
 
struct drm_device *main_device;
 
struct drm_file *drm_file_handlers[256];
 
static int i915_modeset __read_mostly = 1;
module_param_named(modeset, i915_modeset, int, 0400);
MODULE_PARM_DESC(modeset,
131,36 → 127,48
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
 
unsigned int i915_preliminary_hw_support __read_mostly = true;
int i915_enable_psr __read_mostly = 0;
module_param_named(enable_psr, i915_enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support. (default: false)");
"Enable preliminary hardware support.");
 
int i915_disable_power_well __read_mostly = 0;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: false)");
"Disable the power well when possible (default: true)");
 
int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
bool i915_fastboot __read_mostly = 0;
module_param_named(fastboot, i915_fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
"(default: false)");
 
int i915_enable_pc8 __read_mostly = 0;
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
 
int i915_pc8_timeout __read_mostly = 5000;
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
 
bool i915_prefault_disable __read_mostly;
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 
static struct drm_driver driver;
extern int intel_agp_enabled;
 
#define PCI_VENDOR_ID_INTEL 0x8086
 
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
 
#define INTEL_QUANTA_VGA_DEVICE(info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = 0x16a, \
.subvendor = 0x152d, \
.subdevice = 0x8990, \
.driver_data = (unsigned long) info }
 
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
270,6 → 278,7
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
.has_fbc = 1,
};
 
static const struct intel_device_info intel_ivybridge_q_info = {
298,6 → 307,9
static const struct intel_device_info intel_haswell_d_info = {
GEN7_FEATURES,
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_vebox_ring = 1,
};
 
static const struct intel_device_info intel_haswell_m_info = {
304,115 → 316,43
GEN7_FEATURES,
.is_haswell = 1,
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
.has_vebox_ring = 1,
};
 
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
* and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
#define INTEL_PCI_IDS \
INTEL_I915G_IDS(&intel_i915g_info), \
INTEL_I915GM_IDS(&intel_i915gm_info), \
INTEL_I945G_IDS(&intel_i945g_info), \
INTEL_I945GM_IDS(&intel_i945gm_info), \
INTEL_I965G_IDS(&intel_i965g_info), \
INTEL_G33_IDS(&intel_g33_info), \
INTEL_I965GM_IDS(&intel_i965gm_info), \
INTEL_GM45_IDS(&intel_gm45_info), \
INTEL_G45_IDS(&intel_g45_info), \
INTEL_PINEVIEW_IDS(&intel_pineview_info), \
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info)
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
INTEL_PCI_IDS,
{0, 0, 0}
};
 
432,7 → 372,6
*/
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
dev_priv->num_pch_pll = 0;
return;
}
 
441,9 → 380,15
* make graphics device passthrough work easy for VMM, that only
* need to expose ISA bridge to let driver know the real hardware
* underneath. This is a requirement from virtualization team.
*
* In some virtualized environments (e.g. XEN), there is irrelevant
* ISA bridge in the system. To work reliably, we should scan trhough
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
while (pch) {
struct pci_dev *curr = pch;
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
451,36 → 396,38
 
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
} else {
goto check_next;
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
break;
}
check_next:
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
// pci_dev_put(curr);
}
if (!pch)
DRM_DEBUG_KMS("No PCH found?\n");
}
 
bool i915_semaphore_is_enabled(struct drm_device *dev)
500,59 → 447,449
return 1;
}
 
#if 0
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
 
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
intel_set_power_well(dev, true);
 
drm_kms_helper_poll_disable(dev);
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent);
pci_save_state(dev->pdev);
 
int i915_init(void)
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
 
mutex_lock(&dev->struct_mutex);
error = i915_gem_idle(dev);
mutex_unlock(&dev->struct_mutex);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
return error;
}
 
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
 
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
 
intel_modeset_suspend_hw(dev);
}
 
i915_save_state(dev);
 
intel_opregion_fini(dev);
 
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
 
return 0;
}
 
int i915_suspend(struct drm_device *dev, pm_message_t state)
{
static pci_dev_t device;
const struct pci_device_id *ent;
int err;
int error;
 
ent = find_pci_device(&device, pciidlist);
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
if (!dev || !dev->dev_private) {
DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
};
}
 
if (state.event == PM_EVENT_PRETHAW)
return 0;
 
 
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
error = i915_drm_freeze(dev);
if (error)
return error;
 
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
 
return 0;
}
 
void intel_console_resume(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
console_resume_work);
struct drm_device *dev = dev_priv->dev;
 
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
}
 
static void intel_resume_hotplug(struct drm_device *dev)
{
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
 
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
 
mutex_unlock(&mode_config->mutex);
 
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
 
i915_restore_state(dev);
intel_opregion_setup(dev);
 
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_init_pch_refclk(dev);
 
mutex_lock(&dev->struct_mutex);
 
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
 
/* We need working interrupts for modeset enabling ... */
drm_irq_install(dev);
 
intel_modeset_init_hw(dev);
 
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
 
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
intel_resume_hotplug(dev);
}
 
intel_opregion_init(dev);
 
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);
}
 
/* Undo what we did at i915_drm_freeze so the refcount goes back to the
* expected level. */
hsw_enable_package_c8(dev_priv);
 
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
return error;
}
 
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
 
intel_uncore_sanitize(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
 
__i915_drm_thaw(dev);
 
return error;
}
 
int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
if (pci_enable_device(dev->pdev))
return -EIO;
 
pci_set_master(dev->pdev);
 
intel_uncore_sanitize(dev);
 
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
 
ret = __i915_drm_thaw(dev);
if (ret)
return ret;
 
drm_kms_helper_poll_enable(dev);
return 0;
}
 
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool simulated;
int ret;
 
if (!i915_try_reset)
return 0;
 
mutex_lock(&dev->struct_mutex);
 
i915_gem_reset(dev);
 
simulated = dev_priv->gpu_error.stop_rings != 0;
 
if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
ret = -ENODEV;
} else {
ret = intel_gpu_reset(dev);
 
/* Also reset the gpu hangman. */
if (simulated) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
} else
dev_priv->gpu_error.last_reset = get_seconds();
}
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
/* Ok, now get things going again... */
 
/*
* Everything depends on having the GTT running, so we need to start
* there. Fortunately we don't need to do this unless we reset the
* chip at a PCI level.
*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
 
dev_priv->ums.mm_suspended = 0;
 
i915_gem_init_swizzling(dev);
 
for_each_ring(ring, dev_priv, i)
ring->init(ring);
 
i915_gem_context_init(dev);
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret)
i915_gem_cleanup_aliasing_ppgtt(dev);
}
 
/*
* It would make sense to re-init all the other hw state, at
* least the rps/rc6/emon init done within modeset_init_hw. For
* some unknown reason, this blows up my ilk, so don't.
*/
 
mutex_unlock(&dev->struct_mutex);
 
drm_irq_uninstall(dev);
drm_irq_install(dev);
intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
 
return 0;
}
 
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
 
if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
* functions have the same PCI-ID!
*/
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
 
/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
* implementation for gen3 (and only gen3) that used legacy drm maps
* (gasp!) to share buffers between X and the client. Hence we need to
* keep around the fake agp stuff for gen3, even when kms is enabled. */
if (intel_info->gen != 3) {
driver.driver_features &=
~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
} else if (!intel_agp_enabled) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
 
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
return drm_get_pci_dev(pdev, ent, &driver);
}
 
if (intel_info->gen != 3) {
static void
i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
 
} else if (init_agp() != 0) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
drm_put_dev(dev);
}
 
static int i915_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
 
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
 
err = drm_get_dev(&device.pci_dev, ent);
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
return err;
error = i915_drm_freeze(drm_dev);
if (error)
return error;
 
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
 
return 0;
}
 
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_resume(drm_dev);
}
 
static int i915_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
 
return i915_drm_freeze(drm_dev);
}
 
static int i915_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_drm_thaw(drm_dev);
}
 
static int i915_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_drm_freeze(drm_dev);
}
 
#endif
 
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_MODESET,
// .load = i915_driver_load,
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
// .unload = i915_driver_unload,
.open = i915_driver_open,
// .lastclose = i915_driver_lastclose,
566,9 → 903,12
// .device_is_agp = i915_driver_device_is_agp,
// .master_create = i915_master_create,
// .master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
// .gem_vm_ops = &i915_gem_vm_ops,
 
// .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
// .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
589,85 → 929,39
};
 
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static struct drm_device drm_dev;
static struct drm_file drm_file;
 
struct drm_device *dev;
struct drm_file *priv;
 
int ret;
int i915_init(void)
{
static pci_dev_t device;
const struct pci_device_id *ent;
int err;
 
dev = &drm_dev;
priv = &drm_file;
ent = find_pci_device(&device, pciidlist);
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
return -ENODEV;
};
 
drm_file_handlers[0] = priv;
drm_core_init();
 
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
/*
if (intel_info->gen != 3) {
 
pci_set_master(pdev);
} else if (init_agp() != 0) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
*/
err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
 
// if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
// printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
// goto err_g2;
// }
 
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
 
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
 
spin_lock_init(&dev->count_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
 
idr_init(&priv->object_idr);
spin_lock_init(&priv->table_lock);
 
dev->driver = &driver;
 
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto err_g4;
return err;
}
 
ret = i915_driver_load(dev, ent->driver_data );
 
if (ret)
goto err_g4;
 
ret = init_display_kms(dev);
 
if (ret)
goto err_g4;
 
return 0;
 
err_g4:
//err_g3:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
//err_g2:
// pci_disable_device(pdev);
//err_g1:
 
return ret;
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
777,9 → 1071,9
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
 
786,7 → 1080,7
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
797,7 → 1091,7
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed write to %x\n", reg);
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
804,50 → 1098,3
}
}
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
return val; \
}
 
__i915_read(8, b)
__i915_read(16, w)
__i915_read(32, l)
__i915_read(64, q)
#undef __i915_read
 
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write
/drivers/video/drm/i915/i915_drv.h
88,6 → 88,8
};
#define plane_name(p) ((p) + 'A')
 
#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
 
enum port {
PORT_A = 0,
PORT_B,
98,6 → 100,24
};
#define port_name(p) ((p) + 'A')
 
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
};
 
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
 
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
124,15 → 144,41
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
 
struct intel_pch_pll {
struct drm_i915_private;
 
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B,
};
#define I915_NUM_PLLS 2
 
struct intel_dpll_hw_state {
uint32_t dpll;
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
};
 
struct intel_shared_dpll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
int pll_reg;
int fp0_reg;
int fp1_reg;
const char *name;
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*disable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
};
#define I915_NUM_PLLS 2
 
/* Used by dp and fdi links */
struct intel_link_m_n {
167,7 → 213,6
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
 
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
 
187,7 → 232,6
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct drm_i915_private;
 
struct intel_opregion {
struct opregion_header __iomem *header;
290,8 → 334,8
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
};
298,6 → 342,8
 
struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
 
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
305,16 → 351,35
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
/**
* find_dpll() - Find the best values for the PLL
* @limit: limits for the PLL
* @crtc: current CRTC
* @target: target frequency in kHz
* @refclk: reference clock frequency in kHz
* @match_clock: if provided, @best_clock P divider must
* match the P divider from @match_clock
* used for LVDS downclocking
* @best_clock: best PLL values found
*
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
struct drm_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
327,7 → 392,8
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *obj,
uint32_t flags);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
338,73 → 404,122
/* pll clock increase/decrease */
};
 
struct drm_i915_gt_funcs {
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
 
#define DEV_INFO_FLAGS \
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_llc)
struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
 
struct intel_uncore_funcs funcs;
 
unsigned fifo_count;
unsigned forcewake_count;
};
 
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
func(is_i915g) sep \
func(is_i945gm) sep \
func(is_g33) sep \
func(need_gfx_hws) sep \
func(is_g4x) sep \
func(is_pineview) sep \
func(is_broadwater) sep \
func(is_crestline) sep \
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
func(has_force_wake) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
func(cursor_needs_physical) sep \
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_bsd_ring) sep \
func(has_blt_ring) sep \
func(has_vebox_ring) sep \
func(has_llc) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
 
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
 
struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
u8 is_i915g:1;
u8 is_i945gm:1;
u8 is_g33:1;
u8 need_gfx_hws:1;
u8 is_g4x:1;
u8 is_pineview:1;
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
u8 cursor_needs_physical:1;
u8 has_overlay:1;
u8 overlay_needs_physical:1;
u8 supports_tv:1;
u8 has_bsd_ring:1;
u8 has_blt_ring:1;
u8 has_llc:1;
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
};
 
#undef DEFINE_FLAG
#undef SEP_SEMICOLON
 
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
caches, eg sampler/render caches, and the
large Last-Level-Cache. LLC is coherent with
the CPU, but L3 is only visible to the GPU. */
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
 
typedef uint32_t gen6_gtt_pte_t;
 
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
 
struct {
dma_addr_t addr;
struct page *page;
} scratch;
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
 
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level);
void (*clear_range)(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_address_space *vm);
};
 
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
413,8 → 528,7
* the spec.
*/
struct i915_gtt {
unsigned long start; /* Start offset of used GTT */
size_t total; /* Total size GTT can map */
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
 
unsigned long mappable_end; /* End offset that we can CPU map */
425,60 → 539,90
void __iomem *gsm;
 
bool do_idle_maps;
dma_addr_t scratch_page_dma;
struct page *scratch_page;
 
int mtrr;
 
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
void (*gtt_remove)(struct drm_device *dev);
void (*gtt_clear_range)(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries);
void (*gtt_insert_entries)(struct drm_device *dev,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
};
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
struct drm_device *dev;
struct i915_address_space base;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
 
/* pte functions, mirroring the interface of the global gtt. */
void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
unsigned int first_entry,
unsigned int num_entries);
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
int (*enable)(struct drm_device *dev);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
 
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
*
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
* will always be <= an objects lifetime. So object refcounting should cover us.
*/
struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
 
/** This object's place on the active/inactive lists */
struct list_head mm_list;
 
struct list_head vma_link; /* Link in the object's VMA list */
 
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
};
 
struct i915_ctx_hang_stats {
/* This context had batch pending when hang was declared */
unsigned batch_pending;
 
/* This context had batch active when hang was declared */
unsigned batch_active;
};
 
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
struct i915_hw_context {
struct kref ref;
int id;
bool is_initialized;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
};
 
struct i915_fbc {
unsigned long size;
unsigned int fb_id;
enum plane plane;
int y;
 
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
 
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
} *fbc_work;
 
enum no_fbc_reason {
FBC_OK, /* FBC is enabled */
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */
485,8 → 629,23
FBC_NOT_TILED, /* buffer not tiled */
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
} no_fbc_reason;
};
 
enum no_psr_reason {
PSR_NO_SOURCE, /* Not supported on platform */
PSR_NO_SINK, /* Not supported by panel */
PSR_MODULE_PARAM,
PSR_CRTC_NOT_ACTIVE,
PSR_PWR_WELL_ENABLED,
PSR_NOT_TILED,
PSR_SPRITE_ENABLED,
PSR_S3D_ENABLED,
PSR_INTERLACED_ENABLED,
PSR_HSW_NOT_DDIA,
};
 
enum intel_pch {
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
503,6 → 662,7
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
 
struct intel_fbdev;
struct intel_fbc_work;
669,17 → 829,19
};
 
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
 
/* On vlv we need to manually drop to Vmin with a delayed work. */
struct delayed_work vlv_work;
 
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
u8 hw_max;
 
struct delayed_work delayed_resume_work;
716,6 → 878,15
struct drm_i915_gem_object *renderctx;
};
 
/* Power well structure for haswell */
struct i915_power_well {
struct drm_device *device;
spinlock_t lock;
/* power well enable/disable usage count */
int count;
int i915_request;
};
 
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
729,6 → 900,18
uint32_t counter;
};
 
struct i915_ums_state {
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int mm_suspended;
};
 
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
737,8 → 920,6
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
752,36 → 933,11
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
 
int gtt_mtrr;
 
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
bool shrinker_no_lock_stealing;
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
 
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
800,16 → 956,6
*/
bool interruptible;
 
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
 
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
819,18 → 965,30
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
size_t object_memory;
u32 object_count;
};
 
struct drm_i915_error_state_buf {
unsigned bytes;
unsigned size;
int err;
u8 *buf;
loff_t start;
loff_t pos;
};
 
struct i915_error_state_file_priv {
struct drm_device *dev;
struct drm_i915_error_state *error;
};
 
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
/* For reset and error_state handling. */
spinlock_t lock;
886,6 → 1044,119
MODESET_SUSPENDED,
};
 
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 
/* eDP */
int edp_rate;
int edp_lanes;
int edp_preemphasis;
int edp_vswing;
bool edp_initialized;
bool edp_support;
int edp_bpp;
struct edp_power_seq edp_pps;
 
int crt_ddc_pin;
 
int child_dev_num;
struct child_device_config *child_dev;
};
 
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
INTEL_DDB_PART_5_6, /* IVB+ */
};
 
struct intel_wm_level {
bool enable;
uint32_t pri_val;
uint32_t spr_val;
uint32_t cur_val;
uint32_t fbc_val;
};
 
/*
* This struct tracks the state needed for the Package C8+ feature.
*
* Package states C8 and deeper are really deep PC states that can only be
* reached when all the devices on the system allow it, so even if the graphics
* device allows PC8+, it doesn't mean the system will actually get to these
* states.
*
* Our driver only allows PC8+ when all the outputs are disabled, the power well
* is disabled and the GPU is idle. When these conditions are met, we manually
* do the other conditions: disable the interrupts, clocks and switch LCPLL
* refclk to Fclk.
*
* When we really reach PC8 or deeper states (not just when we allow it) we lose
* the state of some registers, so when we come back from PC8+ we need to
* restore this state. We don't get into PC8+ if we're not in RC6, so we don't
* need to take care of the registers kept by RC6.
*
* The interrupt disabling is part of the requirements. We can only leave the
* PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
* can lock the machine.
*
* Ideally every piece of our code that needs PC8+ disabled would call
* hsw_disable_package_c8, which would increment disable_count and prevent the
* system from reaching PC8+. But we don't have a symmetric way to do this for
* everything, so we have the requirements_met and gpu_idle variables. When we
* switch requirements_met or gpu_idle to true we decrease disable_count, and
* increase it in the opposite case. The requirements_met variable is true when
* all the CRTCs, encoders and the power well are disabled. The gpu_idle
* variable is true when the GPU is idle.
*
* In addition to everything, we only actually enable PC8+ if disable_count
* stays at zero for at least some seconds. This is implemented with the
* enable_work variable. We do this so we don't enable/disable PC8 dozens of
* consecutive times when all screens are disabled and some background app
* queries the state of our connectors, or we have some application constantly
* waking up to use the GPU. Only after the enable_work function actually
* enables PC8+ the "enable" variable will become true, which means that it can
* be false even if disable_count is 0.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
* goes back to false exactly before we reenable the IRQs. We use this variable
* to check if someone is trying to enable/disable IRQs while they're supposed
* to be disabled. This shouldn't happen and we'll print some error messages in
* case it happens, but if it actually happens we'll also update the variables
* inside struct regsave so when we restore the IRQs they will contain the
* latest expected values.
*
* For more, read "Display Sequences for Package C8" on our documentation.
*/
struct i915_package_c8 {
bool requirements_met;
bool gpu_idle;
bool irqs_disabled;
/* Only true after the delayed work task actually enables it. */
bool enabled;
int disable_count;
struct mutex lock;
struct delayed_work enable_work;
 
struct {
uint32_t deimr;
uint32_t sdeimr;
uint32_t gtimr;
uint32_t gtier;
uint32_t gen6_pmimr;
} regsave;
};
 
typedef struct drm_i915_private {
struct drm_device *dev;
 
895,14 → 1166,7
 
void __iomem *regs;
 
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
struct intel_uncore uncore;
 
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
 
939,6 → 1203,7
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
u32 gt_irq_mask;
u32 pm_irq_mask;
 
struct work_struct hotplug_work;
bool enable_hotplug_processing;
951,17 → 1216,13
HPD_MARK_DISABLED = 2
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits;
 
int num_pch_pll;
int num_plane;
 
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
 
struct i915_fbc fbc;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
 
/* overlay */
struct intel_overlay *overlay;
971,37 → 1232,13
struct {
int level;
bool enabled;
spinlock_t lock; /* bl registers and the above bl fields */
struct backlight_device *device;
} backlight;
 
/* LVDS info */
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
 
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
 
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1008,6 → 1245,13
 
unsigned int fsb_freq, mem_freq, is_ddr3;
 
/**
* wq - Driver workqueue for GEM.
*
* NOTE: Work items scheduled here are not allowed to grab any modeset
* locks, for otherwise the flushing done in the pageflip code will
* result in deadlocks.
*/
struct workqueue_struct *wq;
 
/* Display functions */
1022,7 → 1266,8
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
 
struct i915_gtt gtt;
struct list_head vm_list; /* Global list of all address spaces */
struct i915_gtt gtt; /* VMA representing the global address space */
 
struct i915_gem_mm mm;
 
1029,16 → 1274,13
/* Kernel Modesetting */
 
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
 
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
 
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
 
/* Reclocking support */
1047,13 → 1289,14
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
 
bool mchbar_need_disable;
 
struct intel_l3_parity l3_parity;
 
/* Cannot be determined by PCIID. You must always read a register. */
size_t ellc_size;
 
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
 
1061,13 → 1304,15
* mchdev_lock in intel_pm.c */
struct intel_ilk_power_mgmt ips;
 
enum no_fbc_reason no_fbc_reason;
/* Haswell power well */
struct i915_power_well power_well;
 
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
enum no_psr_reason no_psr_reason;
 
struct i915_gpu_error gpu_error;
 
struct drm_i915_gem_object *vlv_pctx;
 
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
 
1087,11 → 1332,34
 
struct i915_suspend_saved_registers regfile;
 
struct {
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
* in 0.5us units for WM1+.
*/
/* primary */
uint16_t pri_latency[5];
/* sprite */
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
} wm;
 
struct i915_package_c8 pc8;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
} drm_i915_private_t;
 
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return dev->dev_private;
}
 
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1104,7 → 1372,7
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
 
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
#define I915_GTT_OFFSET_NONE ((u32)-1)
 
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
1129,15 → 1397,16
 
const struct drm_i915_gem_object_ops *ops;
 
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
/** List of VMAs backed by this object */
struct list_head vma_list;
 
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head gtt_list;
struct list_head global_list;
 
/** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
1204,6 → 1473,7
*/
unsigned int fault_mappable:1;
unsigned int pin_mappable:1;
unsigned int pin_display:1;
 
/*
* Is the GPU currently using a fence to access this buffer,
1211,7 → 1481,7
unsigned int pending_fenced_gpu_access:1;
unsigned int fenced_gpu_access:1;
 
unsigned int cache_level:2;
unsigned int cache_level:3;
 
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
1231,13 → 1501,6
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
 
/**
* Current offset of the object in GTT space.
*
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
 
struct intel_ring_buffer *ring;
 
/** Breadcrumb of last rendering to the buffer. */
1280,9 → 1543,18
/** GEM sequence number associated with this request. */
uint32_t seqno;
 
/** Postion in the ringbuffer of the end of the request */
/** Position in the ringbuffer of the start of the request */
u32 head;
 
/** Position in the ringbuffer of the end of the request */
u32 tail;
 
/** Context related to this request */
struct i915_hw_context *ctx;
 
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
 
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
 
1300,9 → 1572,11
struct list_head request_list;
} mm;
struct idr context_idr;
 
struct i915_ctx_hang_stats hang_stats;
};
 
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define INTEL_INFO(dev) (to_i915(dev)->info)
 
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1320,7 → 1594,6
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1332,6 → 1605,8
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
 
1350,7 → 1625,9
 
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1373,17 → 1650,16
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_IPS(dev) (IS_ULT(dev))
 
#define HAS_DDI(dev) (IS_HASWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1392,7 → 1668,7
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
 
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1440,8 → 1716,14
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
extern int i915_enable_psr __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
extern int i915_enable_ips __read_mostly;
extern bool i915_fastboot __read_mostly;
extern int i915_enable_pc8 __read_mostly;
extern int i915_pc8_timeout __read_mostly;
extern bool i915_prefault_disable __read_mostly;
 
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1472,17 → 1754,22
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 
extern void intel_console_resume(struct work_struct *work);
 
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
 
extern void intel_irq_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
 
void i915_error_state_free(struct kref *error_ref);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1490,15 → 1777,6
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
void intel_enable_asle(struct drm_device *dev);
 
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
#else
#define i915_destroy_error_state(x)
#endif
 
 
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
1555,13 → 1833,18
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
 
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
1598,8 → 1881,6
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
1630,6 → 1911,7
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
1650,10 → 1932,7
}
 
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
1662,9 → 1941,12
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1677,6 → 1959,7
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
1695,6 → 1978,8
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
 
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
 
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
1701,6 → 1986,56
 
void i915_gem_restore_fences(struct drm_device *dev);
 
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
struct i915_address_space *ggtt =
&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
return vm == ggtt;
}
 
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
}
 
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
}
 
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_size(obj, obj_to_ggtt(obj));
}
 
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
#undef obj_to_ggtt
 
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
1707,6 → 2042,21
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file, int to_id);
void i915_gem_context_free(struct kref *ctx_ref);
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
{
kref_get(&ctx->ref);
}
 
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_free);
}
 
struct i915_ctx_hang_stats * __must_check
i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1738,7 → 2088,9
 
 
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
1760,7 → 2112,7
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
/* i915_gem_tiling.c */
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
1773,22 → 2125,37
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
int handle);
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
 
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
 
/* i915_gpu_error.c */
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
const struct i915_error_state_file_priv *error);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
size_t count, loff_t pos);
static inline void i915_error_state_buf_release(
struct drm_i915_error_state_buf *eb)
{
kfree(eb->buf);
}
void i915_capture_error_state(struct drm_device *dev);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
 
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
 
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
1804,7 → 2171,7
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
extern inline bool intel_gmbus_is_port_valid(unsigned port)
static inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
1813,7 → 2180,7
struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
{
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
1825,14 → 2192,10
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern void intel_opregion_gse_intr(struct drm_device *dev);
extern void intel_opregion_enable_asle(struct drm_device *dev);
#else
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
#endif
 
/* intel_acpi.c */
1846,6 → 2209,7
 
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
1858,6 → 2222,9
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
1869,10 → 2236,11
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
 
extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
extern void intel_display_print_error_state(struct seq_file *m,
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
#endif
1883,46 → 2251,55
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination);
 
__i915_read(8, b)
__i915_read(16, w)
__i915_read(32, l)
__i915_read(64, q)
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
 
#define __i915_read(x) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
__i915_read(8)
__i915_read(16)
__i915_read(32)
__i915_read(64)
#undef __i915_read
 
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
 
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#define __i915_write(x) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
__i915_write(8)
__i915_write(16)
__i915_write(32)
__i915_write(64)
#undef __i915_write
 
#define I915_READ8(reg) i915_read8(dev_priv, (reg))
#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
 
#define I915_READ16(reg) i915_read16(dev_priv, (reg))
#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
 
#define I915_READ(reg) i915_read32(dev_priv, (reg))
#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
 
#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
#define I915_READ64(reg) i915_read64(dev_priv, (reg))
#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
 
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
/drivers/video/drm/i915/i915_gem.c
71,8 → 71,11
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
 
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force);
static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking);
88,9 → 91,23
bool enable);
 
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
return HAS_LLC(dev) || level != I915_CACHE_NONE;
}
 
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
return true;
 
return obj->pin_display;
}
 
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
107,15 → 124,19
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
dev_priv->mm.object_memory += size;
spin_unlock(&dev_priv->mm.object_stat_lock);
}
 
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
size_t size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
dev_priv->mm.object_memory -= size;
spin_unlock(&dev_priv->mm.object_stat_lock);
}
 
static int
168,7 → 189,7
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
return obj->gtt_space && !obj->active;
return i915_gem_obj_bound_any(obj) && !obj->active;
}
 
 
213,12 → 234,12
 
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
 
args->aper_size = dev_priv->gtt.total;
args->aper_size = dev_priv->gtt.base.total;
args->aper_available_size = args->aper_size - pinned;
 
return 0;
256,17 → 277,11
return -ENOMEM;
 
ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
if (ret)
return ret;
}
 
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
 
*handle_p = handle;
return 0;
}
283,13 → 298,6
args->size, &args->handle);
}
 
int i915_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
 
/**
* Creates a new mm object and returns a handle to it.
*/
460,9 → 468,8
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
if (obj->gtt_space) {
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
505,7 → 512,7
 
mutex_unlock(&dev->struct_mutex);
 
if (!prefaulted) {
if (likely(!i915_prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
637,7 → 644,7
int page_offset, page_length, ret;
char *vaddr;
 
ret = i915_gem_object_pin(obj, 0, true, true);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
if (ret)
goto out;
 
659,7 → 666,7
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
 
offset = obj->gtt_offset + args->offset;
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
while (remain > 0) {
/* Operation in this page
786,19 → 793,18
* write domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
if (obj->gtt_space) {
needs_clflush_after = cpu_write_needs_clflush(obj);
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
}
}
/* Same trick applies for invalidate partially written cachelines before
* writing. */
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
needs_clflush_before =
!cpu_cache_is_coherent(dev, obj->cache_level);
 
ret = i915_gem_object_get_pages(obj);
if (ret)
877,7 → 883,7
*/
if (!needs_clflush_after &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(dev);
}
}
901,6 → 907,9
struct drm_i915_gem_object *obj;
int ret;
 
if (args->size == 0)
return 0;
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
907,9 → 916,6
return 0;
}
 
if (args->size == 0)
return 0;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
949,9 → 955,9
// goto out;
// }
 
if (obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
if (obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
1001,7 → 1007,7
 
ret = 0;
if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL);
ret = i915_add_request(ring, NULL);
 
return ret;
}
1035,6 → 1041,8
bool wait_forever = true;
int ret;
 
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
 
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
 
1045,7 → 1053,7
wait_forever = false;
}
 
timeout_jiffies = timespec_to_jiffies(&wait_time);
timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
 
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
1130,6 → 1138,25
interruptible, NULL);
}
 
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
i915_gem_retire_requests_ring(ring);
 
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
* Note that the last_write_seqno is always the earlier of
* the two (read/write) seqno, so if we haved successfully waited,
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 
return 0;
}
 
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
1150,20 → 1177,9
if (ret)
return ret;
 
i915_gem_retire_requests_ring(ring);
 
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
return i915_gem_object_wait_rendering__tail(obj, ring);
}
 
return 0;
}
 
/* A nonblocking variant of the above wait. This is a highly dangerous routine
* as the object state may change during this call.
*/
1197,21 → 1213,12
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
 
i915_gem_retire_requests_ring(ring);
 
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
return i915_gem_object_wait_rendering__tail(obj, ring);
}
 
return ret;
}
 
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
1368,11 → 1375,7
if (!obj->fault_mappable)
return;
 
if (obj->base.dev->dev_mapping)
// unmap_mapping_range(obj->base.dev->dev_mapping,
// (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
// obj->base.size, 1);
 
// drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
obj->fault_mappable = false;
}
 
1485,7 → 1488,7
goto out;
}
/* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret)
goto out;
 
1499,7 → 1502,7
 
obj->fault_mappable = true;
 
pfn = dev_priv->gtt.mappable_base + obj->gtt_offset;
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
 
/* Finally, remap it using the new GTT offset */
 
1517,7 → 1520,7
i915_gem_object_unpin(obj);
 
 
*offset = (u64)mem;
*offset = mem;
 
out:
drm_gem_object_unreference(&obj->base);
1592,7 → 1595,7
* hope for the best.
*/
WARN_ON(ret != -EIO);
i915_gem_clflush_object(obj);
i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
1620,15 → 1623,15
if (obj->pages == NULL)
return 0;
 
BUG_ON(obj->gtt_space);
 
if (obj->pages_pin_count)
return -EBUSY;
 
BUG_ON(i915_gem_obj_bound_any(obj));
 
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
list_del(&obj->gtt_list);
list_del(&obj->global_list);
 
ops->put_pages(obj);
obj->pages = NULL;
1671,7 → 1674,6
 
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
sg_free_table(st);
kfree(st);
FAIL();
return -ENOMEM;
1741,7 → 1743,7
if (ret)
return ret;
 
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
 
1754,6 → 1756,10
u32 seqno = intel_ring_get_seqno(ring);
 
BUG_ON(ring == NULL);
if (obj->ring != ring && obj->last_write_seqno) {
/* Keep the seqno relative to the current ring */
obj->last_write_seqno = seqno;
}
obj->ring = ring;
 
/* Add a reference if we're newly entering the active list. */
1762,8 → 1768,6
obj->active = 1;
}
 
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
 
obj->last_read_seqno = seqno;
1785,13 → 1789,14
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
 
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
 
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
 
list_del_init(&obj->ring_list);
obj->ring = NULL;
1879,17 → 1884,18
return 0;
}
 
int
i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
u32 request_ring_position, request_start;
int was_empty;
int ret;
 
request_start = intel_ring_get_tail(ring);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
1921,7 → 1927,21
 
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
request->ctx = ring->last_context;
request->batch_obj = obj;
 
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
 
if (request->ctx)
i915_gem_context_reference(request->ctx);
 
request->emitted_jiffies = GetTimerTicks();
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
1940,12 → 1960,9
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
 
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
// mod_timer(&dev_priv->hangcheck_timer,
// jiffies +
// msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
if (!dev_priv->ums.mm_suspended) {
// i915_queue_hangcheck(ring->dev);
 
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
1975,9 → 1992,130
spin_unlock(&file_priv->mm.lock);
}
 
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
if (acthd >= i915_gem_obj_offset(obj, vm) &&
acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
return true;
 
return false;
}
 
static bool i915_head_inside_request(const u32 acthd_unmasked,
const u32 request_start,
const u32 request_end)
{
const u32 acthd = acthd_unmasked & HEAD_ADDR;
 
if (request_start < request_end) {
if (acthd >= request_start && acthd < request_end)
return true;
} else if (request_start > request_end) {
if (acthd >= request_start || acthd < request_end)
return true;
}
 
return false;
}
 
static struct i915_address_space *
request_to_vm(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
struct i915_address_space *vm;
 
vm = &dev_priv->gtt.base;
 
return vm;
}
 
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
/* There is a possibility that unmasked head address
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
if (request->batch_obj) {
if (i915_head_inside_object(acthd, request->batch_obj,
request_to_vm(request))) {
*inside = true;
return true;
}
}
 
if (i915_head_inside_request(acthd, request->head, request->tail)) {
*inside = false;
return true;
}
 
return false;
}
 
static void i915_set_reset_status(struct intel_ring_buffer *ring,
struct drm_i915_gem_request *request,
u32 acthd)
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty;
unsigned long offset = 0;
 
/* Innocent until proven guilty */
guilty = false;
 
if (request->batch_obj)
offset = i915_gem_obj_offset(request->batch_obj,
request_to_vm(request));
 
if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
offset,
request->ctx ? request->ctx->id : 0,
acthd);
 
guilty = true;
}
 
/* If contexts are disabled or this is the default context, use
* file_priv->reset_state
*/
if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
hs = &request->ctx->hang_stats;
else if (request->file_priv)
hs = &request->file_priv->hang_stats;
 
if (hs) {
if (guilty)
hs->batch_active++;
else
hs->batch_pending++;
}
}
 
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
list_del(&request->list);
i915_gem_request_remove_from_client(request);
 
if (request->ctx)
i915_gem_context_unreference(request->ctx);
 
kfree(request);
}
 
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
u32 completed_seqno;
u32 acthd;
 
acthd = intel_ring_get_active_head(ring);
completed_seqno = ring->get_seqno(ring, false);
 
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
1985,9 → 2123,10
struct drm_i915_gem_request,
list);
 
list_del(&request->list);
i915_gem_request_remove_from_client(request);
kfree(request);
if (request->seqno > completed_seqno)
i915_set_reset_status(ring, request, acthd);
 
i915_gem_free_request(request);
}
 
while (!list_empty(&ring->active_list)) {
2008,14 → 2147,23
 
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
i915_gem_write_fence(dev, i, reg->obj);
 
/*
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (reg->obj) {
i915_gem_object_update_fence(reg->obj, reg,
reg->obj->tiling_mode);
} else {
i915_gem_write_fence(dev, i, NULL);
}
}
}
 
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
 
2022,16 → 2170,6
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
 
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
list_for_each_entry(obj,
&dev_priv->mm.inactive_list,
mm_list)
{
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
 
i915_gem_restore_fences(dev);
}
 
2068,9 → 2206,7
*/
ring->last_retired_head = request->tail;
 
list_del(&request->list);
i915_gem_request_remove_from_client(request);
kfree(request);
i915_gem_free_request(request);
}
 
/* Move any buffers on the active list that are no longer referenced
2137,12 → 2273,12
idle = true;
for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty)
i915_add_request(ring, NULL, NULL);
i915_add_request(ring, NULL);
 
idle &= list_empty(&ring->request_list);
}
 
if (!dev_priv->mm.suspended && !idle)
if (!dev_priv->ums.mm_suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
2277,12 → 2413,9
old_write_domain);
}
 
/**
* Unbinds an object from the GTT aperture.
*/
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
 
2289,9 → 2422,12
if(obj == get_fb_obj())
return 0;
 
if (obj->gtt_space == NULL)
if (list_empty(&vma->vma_link))
return 0;
 
if (!drm_mm_node_allocated(&vma->node))
goto destroy;
 
if (obj->pin_count)
return -EBUSY;
 
2312,7 → 2448,7
if (ret)
return ret;
 
trace_i915_gem_object_unbind(obj);
trace_i915_vma_unbind(vma);
 
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
2321,19 → 2457,48
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
 
list_del(&obj->mm_list);
list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
 
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
obj->gtt_offset = 0;
drm_mm_remove_node(&vma->node);
 
destroy:
i915_gem_vma_destroy(vma);
 
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
* NB: Until we have real VMAs there will only ever be one */
WARN_ON(!list_empty(&obj->vma_list));
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
return 0;
}
 
/**
* Unbinds an object from the global GTT aperture.
*/
int
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
 
if (!i915_gem_obj_ggtt_bound(obj))
return 0;
 
if (obj->pin_count)
return -EBUSY;
 
BUG_ON(obj->pages == NULL);
 
return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
}
 
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
2360,7 → 2525,6
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
uint64_t val;
 
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
2370,23 → 2534,42
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
 
fence_reg += reg * 8;
 
/* To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
* for a partial fence not to be evaluated between writes, we
* precede the update with write to turn off the fence register,
* and only enable the fence as the last step.
*
* For extra levels of paranoia, we make sure each step lands
* before applying the next step.
*/
I915_WRITE(fence_reg, 0);
POSTING_READ(fence_reg);
 
if (obj) {
u32 size = obj->gtt_space->size;
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
 
val = (uint64_t)((obj->gtt_offset + size - 4096) &
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
 
fence_reg += reg * 8;
I915_WRITE64(fence_reg, val);
I915_WRITE(fence_reg + 4, val >> 32);
POSTING_READ(fence_reg + 4);
 
I915_WRITE(fence_reg + 0, val);
POSTING_READ(fence_reg);
} else {
I915_WRITE(fence_reg + 4, 0);
POSTING_READ(fence_reg + 4);
}
}
 
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
2395,15 → 2578,15
u32 val;
 
if (obj) {
u32 size = obj->gtt_space->size;
u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
 
WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
(obj->gtt_offset & (size - 1)),
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
obj->gtt_offset, obj->map_and_fenceable, size);
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
2414,7 → 2597,7
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
 
val = obj->gtt_offset;
val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
2439,19 → 2622,19
uint32_t val;
 
if (obj) {
u32 size = obj->gtt_space->size;
u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
 
WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
(obj->gtt_offset & (size - 1)),
"object 0x%08x not 512K or pot-size 0x%08x aligned\n",
obj->gtt_offset, size);
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
i915_gem_obj_ggtt_offset(obj), size);
 
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
 
val = obj->gtt_offset;
val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
2480,6 → 2663,10
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
 
WARN(obj && (!obj->stride || !obj->tiling_mode),
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode);
 
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
2503,36 → 2690,17
return fence - dev_priv->fence_regs;
}
 
static void i915_gem_write_fence__ipi(void *data)
{
asm volatile("wbinvd");
 
}
 
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg = fence_number(dev_priv, fence);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int reg = fence_number(dev_priv, fence);
 
/* In order to fully serialize access to the fenced region and
* the update to the fence register we need to take extreme
* measures on SNB+. In theory, the write to the fence register
* flushes all memory transactions before, and coupled with the
* mb() placed around the register write we serialise all memory
* operations with respect to the changes in the tiler. Yet, on
* SNB+ we need to take a step further and emit an explicit wbinvd()
* on each processor in order to manually flush all memory
* transactions before updating the fence register.
*/
if (HAS_LLC(obj->base.dev))
on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
 
if (enable) {
obj->fence_reg = fence_reg;
obj->fence_reg = reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
2540,6 → 2708,7
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
obj->fence_dirty = false;
}
 
static int
2669,7 → 2838,6
return 0;
 
i915_gem_object_update_fence(obj, reg, enable);
obj->fence_dirty = false;
 
return 0;
}
2687,7 → 2855,7
if (HAS_LLC(dev))
return true;
 
if (gtt_space == NULL)
if (!drm_mm_node_allocated(gtt_space))
return true;
 
if (list_empty(&gtt_space->node_list))
2711,7 → 2879,7
struct drm_i915_gem_object *obj;
int err = 0;
 
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
if (obj->gtt_space == NULL) {
printk(KERN_ERR "object found on GTT list with no space reserved\n");
err++;
2720,8 → 2888,8
 
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
obj->gtt_space->start,
obj->gtt_space->start + obj->gtt_space->size,
i915_gem_obj_ggtt_offset(obj),
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level,
obj->gtt_space->color);
err++;
2732,8 → 2900,8
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
obj->gtt_space->start,
obj->gtt_space->start + obj->gtt_space->size,
i915_gem_obj_ggtt_offset(obj),
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level);
err++;
continue;
2748,7 → 2916,8
* Finds free space in the GTT aperture and binds the object there.
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking)
2755,9 → 2924,10
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
size_t gtt_max =
map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
 
fence_size = i915_gem_get_gtt_size(dev,
2784,10 → 2954,11
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
(map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
FAIL();
if (obj->base.size > gtt_max) {
DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
obj->base.size,
map_and_fenceable ? "mappable" : "total",
gtt_max);
return -E2BIG;
}
 
2797,62 → 2968,69
 
i915_gem_object_pin_pages(obj);
 
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
BUG_ON(!i915_is_ggtt(vm));
 
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
 
/* For now we only ever use 1 vma per object */
WARN_ON(!list_is_singular(&obj->vma_list));
 
search_free:
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
DRM_MM_SEARCH_DEFAULT);
if (ret) {
 
i915_gem_object_unpin_pages(obj);
kfree(node);
return ret;
goto err_free_vma;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node);
return -EINVAL;
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
obj->cache_level))) {
ret = -EINVAL;
goto err_remove_node;
}
 
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node);
return ret;
}
if (ret)
goto err_remove_node;
 
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
 
obj->gtt_space = node;
obj->gtt_offset = node->start;
if (i915_is_ggtt(vm)) {
bool mappable, fenceable;
 
fenceable =
node->size == fence_size &&
(node->start & (fence_alignment - 1)) == 0;
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
 
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
mappable = (vma->node.start + obj->base.size <=
dev_priv->gtt.mappable_end);
 
obj->map_and_fenceable = mappable && fenceable;
}
 
i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
 
trace_i915_vma_bind(vma, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
 
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
i915_gem_vma_destroy(vma);
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
}
 
void
i915_gem_clflush_object(struct drm_i915_gem_object *obj)
bool
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
2859,7 → 3037,7
* again at bind time.
*/
if (obj->pages == NULL)
return;
return false;
 
/*
* Stolen memory is always coherent with the GPU as it is explicitly
2866,7 → 3044,7
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen)
return;
return false;
 
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
2876,8 → 3054,8
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
if (obj->cache_level != I915_CACHE_NONE)
return;
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
return false;
#if 0
if(obj->mapped != NULL)
{
2927,6 → 3105,7
}
#endif
 
return true;
}
 
/** Flushes the GTT write domain for the object if it's dirty. */
2958,7 → 3137,8
 
/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force)
{
uint32_t old_write_domain;
 
2965,8 → 3145,9
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
 
i915_gem_clflush_object(obj);
if (i915_gem_clflush_object(obj, force))
i915_gem_chipset_flush(obj->base.dev);
 
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
2989,7 → 3170,7
int ret;
 
/* Not valid to be called on unbound objects. */
if (obj->gtt_space == NULL)
if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
 
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2999,7 → 3180,7
if (ret)
return ret;
 
i915_gem_object_flush_cpu_write_domain(obj);
i915_gem_object_flush_cpu_write_domain(obj, false);
 
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
3027,9 → 3208,15
old_write_domain);
 
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
if (i915_gem_object_is_inactive(obj)) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
&dev_priv->gtt.base);
if (vma)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
 
}
 
return 0;
}
 
3038,6 → 3225,7
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma;
int ret;
 
if (obj->cache_level == cache_level)
3048,13 → 3236,17
return -EBUSY;
}
 
if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
ret = i915_gem_object_unbind(obj);
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
 
break;
}
}
 
if (obj->gtt_space) {
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
3076,11 → 3268,13
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
 
obj->gtt_space->color = cache_level;
}
 
if (cache_level == I915_CACHE_NONE) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
 
if (cpu_write_needs_clflush(obj)) {
u32 old_read_domains, old_write_domain;
 
/* If we're coming from LLC cached, then we haven't
3090,7 → 3284,6
* Just set it to the CPU cache for now.
*/
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
 
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
3103,7 → 3296,6
old_write_domain);
}
 
obj->cache_level = cache_level;
i915_gem_verify_gtt(dev);
return 0;
}
3131,8 → 3323,21
goto unlock;
}
 
args->caching = obj->cache_level != I915_CACHE_NONE;
switch (obj->cache_level) {
case I915_CACHE_LLC:
case I915_CACHE_L3_LLC:
args->caching = I915_CACHING_CACHED;
break;
 
case I915_CACHE_WT:
args->caching = I915_CACHING_DISPLAY;
break;
 
default:
args->caching = I915_CACHING_NONE;
break;
}
 
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
3160,6 → 3365,9
case I915_CACHING_CACHED:
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
3182,6 → 3390,22
return ret;
}
 
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
/* There are 3 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
* 3. The user.
*
* We can ignore reservations as we hold the struct_mutex and
* are only called outside of the reservation path. The user
* can only increment pin_count once, and so if after
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
return obj->pin_count - !!obj->user_pin_count;
}
 
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
3201,6 → 3425,11
return ret;
}
 
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
obj->pin_display = true;
 
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
3210,19 → 3439,20
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
return ret;
goto err_unpin_display;
 
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
ret = i915_gem_object_pin(obj, alignment, true, false);
ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
if (ret)
return ret;
goto err_unpin_display;
 
i915_gem_object_flush_cpu_write_domain(obj);
i915_gem_object_flush_cpu_write_domain(obj, true);
 
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
3238,8 → 3468,19
old_write_domain);
 
return 0;
 
err_unpin_display:
obj->pin_display = is_pin_display(obj);
return ret;
}
 
void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin(obj);
obj->pin_display = is_pin_display(obj);
}
 
int
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
3283,7 → 3524,7
 
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
i915_gem_clflush_object(obj, false);
 
obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
3361,35 → 3602,42
 
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
struct i915_vma *vma;
int ret;
 
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
 
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
 
vma = i915_gem_obj_to_vma(obj, vm);
 
if (vma) {
if ((alignment &&
vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
obj->gtt_offset, alignment,
i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
}
 
if (obj->gtt_space == NULL) {
if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
ret = i915_gem_object_bind_to_gtt(obj, alignment,
ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
3412,7 → 3660,7
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
BUG_ON(!i915_gem_obj_bound_any(obj));
 
if (--obj->pin_count == 0)
obj->pin_mappable = false;
3456,7 → 3704,7
}
 
if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
3464,11 → 3712,7
obj->user_pin_count++;
obj->pin_filp = file;
 
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj->gtt_offset;
args->offset = i915_gem_obj_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
3620,10 → 3864,11
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
 
obj->ops = ops;
 
3648,8 → 3893,6
gfp_t mask;
 
obj = i915_gem_object_alloc(dev);
 
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
{
FAIL();
3657,8 → 3900,7
};
 
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
kfree(obj);
FAIL();
i915_gem_object_free(obj);
return NULL;
}
 
3700,33 → 3942,42
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
 
trace_i915_gem_object_destroy(obj);
 
// if (obj->phys_obj)
// i915_gem_detach_phys_object(dev, obj);
// printf("%s obj %p\n", __FUNCTION__, obj);
 
obj->pin_count = 0;
if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
/* NB: 0 or 1 elements */
WARN_ON(!list_empty(&obj->vma_list) &&
!list_is_singular(&obj->vma_list));
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret = i915_vma_unbind(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;
 
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
 
WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_vma_unbind(vma));
 
dev_priv->mm.interruptible = was_interruptible;
}
}
 
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
 
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
// i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
 
BUG_ON(obj->pages);
 
// if (obj->base.import_attach)
// drm_prime_gem_destroy(&obj->base, NULL);
 
if(obj->base.filp != NULL)
{
3738,9 → 3989,38
i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
kfree(obj->bit_17);
kfree(obj);
i915_gem_object_free(obj);
}
 
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
 
INIT_LIST_HEAD(&vma->vma_link);
INIT_LIST_HEAD(&vma->mm_list);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
 
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else
list_add_tail(&vma->vma_link, &obj->vma_list);
 
return vma;
}
 
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
list_del(&vma->vma_link);
kfree(vma);
}
 
#if 0
int
i915_gem_idle(struct drm_device *dev)
3748,9 → 4028,7
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
mutex_lock(&dev->struct_mutex);
 
if (dev_priv->mm.suspended) {
if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
3766,18 → 4044,11
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
 
mutex_unlock(&dev->struct_mutex);
 
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
3877,12 → 4148,21
goto cleanup_bsd_ring;
}
 
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (HAS_VEBOX(dev)) {
ret = intel_init_vebox_ring_buffer(dev);
if (ret)
goto cleanup_blt_ring;
}
 
 
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_vebox_ring;
 
return 0;
 
cleanup_vebox_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
3902,8 → 4182,8
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
 
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
3981,7 → 4261,7
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
3993,7 → 4273,7
}
 
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
dev_priv->ums.mm_suspended = 0;
 
ret = i915_gem_init_hw(dev);
if (ret != 0) {
4001,7 → 4281,7
return ret;
}
 
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
mutex_unlock(&dev->struct_mutex);
 
ret = drm_irq_install(dev);
4013,7 → 4293,7
cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
dev_priv->mm.suspended = 1;
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
 
return ret;
4023,11 → 4303,26
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
 
drm_irq_uninstall(dev);
return i915_gem_idle(dev);
 
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound ums.mm_suspended!
*/
if (ret != 0)
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
void
4038,9 → 4333,11
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
}
#endif
 
4051,6 → 4348,16
INIT_LIST_HEAD(&ring->request_list);
}
 
static void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
list_add(&vm->global_link, &dev_priv->vm_list);
}
 
void
i915_gem_load(struct drm_device *dev)
{
4057,8 → 4364,9
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
 
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4093,9 → 4401,308
 
dev_priv->mm.interruptible = true;
 
// dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
// dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
// register_shrinker(&dev_priv->mm.inactive_shrinker);
}
 
#if 0
/*
* Create a physically contiguous memory object for this object
* e.g. for cursor + overlay regs
*/
static int i915_gem_init_phys_object(struct drm_device *dev,
int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
int ret;
 
if (dev_priv->mm.phys_objs[id - 1] || !size)
return 0;
 
phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
if (!phys_obj)
return -ENOMEM;
 
phys_obj->id = id;
 
phys_obj->handle = drm_pci_alloc(dev, size, align);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
}
#ifdef CONFIG_X86
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
 
dev_priv->mm.phys_objs[id - 1] = phys_obj;
 
return 0;
kfree_obj:
kfree(phys_obj);
return ret;
}
 
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
 
if (!dev_priv->mm.phys_objs[id - 1])
return;
 
phys_obj = dev_priv->mm.phys_objs[id - 1];
if (phys_obj->cur_obj) {
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
}
 
#ifdef CONFIG_X86
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
drm_pci_free(dev, phys_obj->handle);
kfree(phys_obj);
dev_priv->mm.phys_objs[id - 1] = NULL;
}
 
void i915_gem_free_all_phys_object(struct drm_device *dev)
{
int i;
 
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
i915_gem_free_phys_object(dev, i);
}
 
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
char *vaddr;
int i;
int page_count;
 
if (!obj->phys_obj)
return;
vaddr = obj->phys_obj->handle->vaddr;
 
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
kunmap_atomic(dst);
 
drm_clflush_pages(&page, 1);
 
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
}
}
i915_gem_chipset_flush(dev);
 
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
}
 
int
i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
int align)
{
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = 0;
int page_count;
int i;
 
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
 
if (obj->phys_obj) {
if (obj->phys_obj->id == id)
return 0;
i915_gem_detach_phys_object(dev, obj);
}
 
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
obj->base.size, align);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n",
id, obj->base.size);
return ret;
}
}
 
/* bind to the object */
obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj->phys_obj->cur_obj = obj;
 
page_count = obj->base.size / PAGE_SIZE;
 
for (i = 0; i < page_count; i++) {
struct page *page;
char *dst, *src;
 
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
 
src = kmap_atomic(page);
dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
 
mark_page_accessed(page);
page_cache_release(page);
}
 
return 0;
}
 
static int
i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
 
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
 
/* The physical object once assigned is fixed for the lifetime
* of the obj, so we can safely drop the lock and continue
* to access vaddr.
*/
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
if (unwritten)
return -EFAULT;
}
 
i915_gem_chipset_flush(dev);
return 0;
}
 
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
while (!list_empty(&file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
 
request = list_first_entry(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);
list_del(&request->client_list);
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
}
#endif
 
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
 
/* All the new VM stuff */
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
 
if (vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
 
BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
 
}
return 0; //-1;
}
 
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
return true;
 
return false;
}
 
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_address_space *vm;
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
if (i915_gem_obj_bound(o, vm))
return true;
 
return false;
}
 
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
 
if (vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
 
BUG_ON(list_empty(&o->vma_list));
 
list_for_each_entry(vma, &o->vma_list, vma_link)
if (vma->vm == vm)
return vma->node.size;
 
return 0;
}
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm)
return vma;
 
return NULL;
}
 
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = i915_gem_vma_create(obj, vm);
 
return vma;
}
/drivers/video/drm/i915/i915_gem_context.c
113,7 → 113,7
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
124,10 → 124,10
return ret;
}
 
static void do_destroy(struct i915_hw_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
 
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
145,6 → 145,7
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
 
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx);
154,8 → 155,9
 
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
if (ret)
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
}
 
169,18 → 171,18
if (file_priv == NULL)
return ctx;
 
ctx->file_priv = file_priv;
 
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
 
ctx->file_priv = file_priv;
ctx->id = ret;
 
return ctx;
 
err_out:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
 
212,13 → 214,17
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret)
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
}
 
ret = do_switch(ctx);
if (ret)
if (ret) {
DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
goto err_unpin;
}
 
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
226,7 → 232,7
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ret;
}
 
236,6 → 242,7
 
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
return;
}
 
248,11 → 255,13
 
if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
return;
}
 
if (create_default_context(dev_priv)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
return;
}
 
262,6 → 271,7
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
if (dev_priv->hw_contexts_disabled)
return;
269,11 → 279,16
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
// intel_gpu_reset(dev);
intel_gpu_reset(dev);
 
i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
i915_gem_object_unpin(dctx->obj);
 
do_destroy(dev_priv->ring[RCS].default_context);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
}
 
static int context_idr_cleanup(int id, void *p, void *data)
282,8 → 297,8
 
BUG_ON(id == DEFAULT_CONTEXT_ID);
 
do_destroy(ctx);
 
 
return 0;
}
 
325,6 → 340,7
if (ret)
return ret;
 
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
if (IS_GEN7(ring->dev))
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
332,7 → 348,7
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, new_context->obj->gtt_offset |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
353,16 → 369,16
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
 
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
if (from_obj == to->obj)
if (from == to)
return 0;
 
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
 
382,7 → 398,7
 
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
 
ret = mi_set_context(ring, to, hw_flags);
397,9 → 413,12
* is a bit suboptimal because the retiring can occur simply after the
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring);
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
407,15 → 426,26
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
 
drm_gem_object_unreference(&from_obj->base);
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
 
drm_gem_object_reference(&to->obj->base);
ring->last_context_obj = to->obj;
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
 
i915_gem_context_reference(to);
ring->last_context = to;
to->is_initialized = true;
 
return 0;
444,6 → 474,8
if (dev_priv->hw_contexts_disabled)
return 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
if (ring != &dev_priv->ring[RCS])
return 0;
 
513,7 → 545,6
return -ENOENT;
}
 
do_destroy(ctx);
 
mutex_unlock(&dev->struct_mutex);
 
/drivers/video/drm/i915/i915_gem_execbuffer.c
35,16 → 35,12
 
#define I915_EXEC_SECURE (1<<9)
#define I915_EXEC_IS_PINNED (1<<10)
#define I915_EXEC_VEBOX (4<<0)
 
#define wmb() asm volatile ("sfence")
 
struct drm_i915_gem_object *get_fb_obj();
 
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
 
static unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
204,7 → 200,8
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
218,7 → 215,7
return -ENOENT;
 
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
308,7 → 305,7
return ret;
 
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = (void*)MapIoMem(reloc->offset & PAGE_MASK, 4096, 3);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
324,7 → 321,8
 
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
struct eb_objects *eb,
struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
347,7 → 345,8
do {
u64 offset = r->presumed_offset;
 
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
vm);
if (ret)
return ret;
 
367,13 → 366,15
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
struct drm_i915_gem_relocation_entry *relocs,
struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
 
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
vm);
if (ret)
return ret;
}
382,7 → 383,8
}
 
static int
i915_gem_execbuffer_relocate(struct eb_objects *eb)
i915_gem_execbuffer_relocate(struct eb_objects *eb,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
396,7 → 398,7
*/
// pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
418,6 → 420,7
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
426,8 → 429,6
bool need_fence, need_mappable;
int ret;
 
// ENTER();
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
434,12 → 435,10
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
 
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
false);
if (ret)
{
FAIL();
return ret;
};
 
entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
447,10 → 446,7
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
{
FAIL();
return ret;
};
 
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
467,8 → 463,8
obj->has_aliasing_ppgtt_mapping = 1;
}
 
if (entry->offset != obj->gtt_offset) {
entry->offset = obj->gtt_offset;
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
 
489,7 → 485,7
{
struct drm_i915_gem_exec_object2 *entry;
 
if (!obj->gtt_space)
if (!i915_gem_obj_bound_any(obj))
return;
 
entry = obj->exec_entry;
506,6 → 502,7
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
513,8 → 510,6
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
 
// ENTER();
 
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
562,10 → 557,12
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
u32 obj_offset;
 
if (!obj->gtt_space)
if (!i915_gem_obj_bound(obj, vm))
continue;
 
obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
572,11 → 569,13
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
 
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
 
if ((entry->alignment &&
obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
583,10 → 582,10
 
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space)
if (i915_gem_obj_bound(obj, vm))
continue;
 
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
596,10 → 595,7
i915_gem_execbuffer_unreserve_object(obj);
 
if (ret != -ENOSPC || retry++)
{
// LEAVE();
return ret;
};
 
// ret = i915_gem_evict_everything(ring->dev);
if (ret)
613,7 → 609,8
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
697,7 → 694,7
goto err;
 
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
 
704,7 → 701,8
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
reloc + reloc_offset[offset],
vm);
if (ret)
goto err;
}
727,6 → 725,7
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
 
list_for_each_entry(obj, objects, exec_list) {
735,12 → 734,12
return ret;
 
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
flush_chipset |= i915_gem_clflush_object(obj, false);
 
flush_domains |= obj->base.write_domain;
}
 
if (flush_domains & I915_GEM_DOMAIN_CPU)
if (flush_chipset)
i915_gem_chipset_flush(ring->dev);
 
if (flush_domains & I915_GEM_DOMAIN_GTT)
803,6 → 802,7
 
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
817,12 → 817,14
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
/* FIXME: This lookup gets fixed later <-- danvet */
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
intel_mark_fb_busy(obj, ring);
}
 
trace_i915_gem_object_change_domain(obj, old_read, old_write);
832,13 → 834,14
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
struct intel_ring_buffer *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
 
/* Add a breadcrumb for the completion of the batch buffer */
(void)i915_add_request(ring, file, NULL);
(void)__i915_add_request(ring, file, obj, NULL);
}
 
static int
870,7 → 873,8
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
884,17 → 888,11
bool need_relocs;
 
if (!i915_gem_check_execbuffer(args))
{
FAIL();
return -EINVAL;
}
 
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
{
FAIL();
return ret;
};
 
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
911,21 → 909,29
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
if (ctx_id != 0) {
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
FAIL();
return -EPERM;
}
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
if (ctx_id != 0) {
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
}
break;
case I915_EXEC_VEBOX:
ring = &dev_priv->ring[VECS];
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
}
break;
 
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
1003,7 → 1009,7
if (ret)
goto pre_mutex_err;
 
if (dev_priv->mm.suspended) {
if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
1028,17 → 1034,17
 
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
 
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
ret = i915_gem_execbuffer_relocate(eb);
ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
1089,7 → 1095,8
goto err;
}
 
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_start = i915_gem_obj_offset(batch_obj, vm) +
args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
 
1103,8 → 1110,8
 
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
err:
eb_destroy(eb);
1122,16 → 1129,14
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
 
// ENTER();
 
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
FAIL();
return -EINVAL;
}
 
1140,7 → 1145,6
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
FAIL();
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
1155,7 → 1159,8
return -EFAULT;
}
 
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
/drivers/video/drm/i915/i915_gem_gtt.c
35,10 → 35,12
#include "i915_trace.h"
#include "intel_drv.h"
 
typedef uint32_t gen6_gtt_pte_t;
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
 
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
 
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
48,11 → 50,21
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
 
static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr,
/* Cacheability Control is a 4-bit value. The low three bits are stored in *
* bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
 
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
59,39 → 71,103
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
case I915_CACHE_L3_LLC:
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
}
 
return pte;
}
 
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
switch (level) {
case I915_CACHE_L3_LLC:
pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
WARN_ON(1);
}
 
return pte;
}
 
static int gen6_ppgtt_enable(struct drm_device *dev)
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
 
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
pte |= BYT_PTE_WRITEABLE;
 
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 
return pte;
}
 
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
 
if (level != I915_CACHE_NONE)
pte |= HSW_WB_LLC_AGE3;
 
return pte;
}
 
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
 
switch (level) {
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
pte |= HSW_WT_ELLC_LLC_AGE0;
break;
default:
pte |= HSW_WB_ELLC_LLC_AGE0;
break;
}
 
return pte;
}
 
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
 
WARN_ON(ppgtt->pd_offset & 0x3f);
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
104,7 → 180,20
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
}
 
static int gen6_ppgtt_enable(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
 
BUG_ON(ppgtt->pd_offset & 0x3f);
 
gen6_write_pdes(ppgtt);
 
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
152,18 → 241,18
}
 
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
 
scratch_pte = gen6_pte_encode(ppgtt->dev,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
 
pt_vaddr = AllocKernelSpace(4096);
 
188,11 → 277,13
FreeKernelSpace(pt_vaddr);
}
 
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
210,8 → 301,7
dma_addr_t page_addr;
 
page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
222,13 → 312,17
FreeKernelSpace(pt_vaddr);
}
 
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
int i;
 
drm_mm_takedown(&ppgtt->base.mm);
 
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->dev->pdev,
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
242,7 → 336,7
 
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->dev;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
253,11 → 347,13
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
 
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
283,7 → 379,7
ppgtt->pt_dma_addr[i] = pt_addr;
}
 
ppgtt->clear_range(ppgtt, 0,
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
317,8 → 413,7
if (!ppgtt)
return -ENOMEM;
 
ppgtt->dev = dev;
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
ppgtt->base.dev = dev;
 
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
327,8 → 422,11
 
if (ret)
kfree(ppgtt);
else
else {
dev_priv->mm.aliasing_ppgtt = ppgtt;
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
}
 
return ret;
}
341,7 → 439,7
if (!ppgtt)
return;
 
ppgtt->cleanup(ppgtt);
ppgtt->base.cleanup(&ppgtt->base);
dev_priv->mm.aliasing_ppgtt = NULL;
}
 
349,8 → 447,8
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
ppgtt->insert_entries(ppgtt, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level);
}
 
357,8 → 455,8
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
ppgtt->clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
ppgtt->base.clear_range(&ppgtt->base,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
 
406,11 → 504,12
struct drm_i915_gem_object *obj;
 
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
dev_priv->gtt.total / PAGE_SIZE);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE);
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj, obj->pin_display);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
 
436,12 → 535,12
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct drm_device *dev,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
450,7 → 549,7
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
i++;
}
 
461,8 → 560,8
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1])
!= gen6_pte_encode(dev, addr, level));
WARN_ON(readl(&gtt_entries[i-1]) !=
vm->pte_encode(addr, level));
 
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
472,11 → 571,11
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
 
static void gen6_ggtt_clear_range(struct drm_device *dev,
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
488,8 → 587,7
if (num_entries > max_entries)
num_entries = max_entries;
 
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
496,7 → 594,7
}
 
 
static void i915_ggtt_insert_entries(struct drm_device *dev,
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
508,7 → 606,7
 
}
 
static void i915_ggtt_clear_range(struct drm_device *dev,
static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
521,9 → 619,10
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
entry,
cache_level);
 
obj->has_global_gtt_mapping = 1;
533,9 → 632,10
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 
dev_priv->gtt.gtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
entry,
obj->base.size >> PAGE_SHIFT);
 
obj->has_global_gtt_mapping = 0;
587,7 → 687,8
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
595,37 → 696,38
BUG_ON(mappable_end > end);
 
/* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
 
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_offset,
obj->base.size,
false);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
list_add(&vma->vma_link, &obj->vma_list);
}
 
dev_priv->gtt.start = start;
dev_priv->gtt.total = end - start;
dev_priv->gtt.base.start = start;
dev_priv->gtt.base.total = end - start;
 
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
hole_start, hole_end) {
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
(hole_end-hole_start) / PAGE_SIZE);
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
}
 
/* And finally clear the reserved guard page */
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
}
 
static bool
648,7 → 750,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
 
gtt_size = dev_priv->gtt.total;
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
 
#if 0
658,10 → 760,10
if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
 
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE);
 
ret = i915_gem_init_aliasing_ppgtt(dev);
if (!ret)
668,12 → 770,12
return;
 
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->mm.gtt_space);
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
drm_mm_takedown(&dev_priv->gtt.base.mm);
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
#endif
 
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE);
}
 
static int setup_scratch_page(struct drm_device *dev)
696,8 → 798,8
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->gtt.scratch_page = page;
dev_priv->gtt.scratch_page_dma = dma_addr;
dev_priv->gtt.base.scratch.page = page;
dev_priv->gtt.base.scratch.addr = dma_addr;
 
return 0;
}
705,11 → 807,13
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->gtt.scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
struct page *page = dev_priv->gtt.base.scratch.page;
 
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->gtt.scratch_page);
__free_page(dev_priv->gtt.scratch_page);
put_page(page);
__free_page(page);
}
 
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
772,17 → 876,18
if (ret)
DRM_ERROR("Scratch setup failed\n");
 
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
 
return ret;
}
 
static void gen6_gmch_remove(struct drm_device *dev)
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->gtt.gsm);
teardown_scratch_page(dev_priv->dev);
 
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
 
static int i915_gmch_probe(struct drm_device *dev,
803,13 → 908,13
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
return 0;
}
 
static void i915_gmch_remove(struct drm_device *dev)
static void i915_gmch_remove(struct i915_address_space *vm)
{
// intel_gmch_remove();
}
821,27 → 926,35
int ret;
 
if (INTEL_INFO(dev)->gen <= 5) {
dev_priv->gtt.gtt_probe = i915_gmch_probe;
dev_priv->gtt.gtt_remove = i915_gmch_remove;
gtt->gtt_probe = i915_gmch_probe;
gtt->base.cleanup = i915_gmch_remove;
} else {
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
gtt->gtt_probe = gen6_gmch_probe;
gtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
gtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
gtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
gtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = snb_pte_encode;
}
 
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
&dev_priv->gtt.stolen_size,
&gtt->mappable_base,
&gtt->mappable_end);
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
 
gtt->base.dev = dev;
 
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
dev_priv->gtt.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
dev_priv->gtt.mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
dev_priv->gtt.stolen_size >> 20);
gtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 
return 0;
}
/drivers/video/drm/i915/i915_gem_stolen.c
45,46 → 45,48
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
struct resource *r;
u32 base;
 
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so on those compute the base by subtracting the
* stolen memory from the Top of Low Usable DRAM which is where the
* BIOS places the graphics stolen memory.
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space. On a few (desktop)
* machines this is also mirrored in the bridge device at different
* locations, or in the MCHBAR. On gen2, the layout is again slightly
* different with the Graphics Segment immediately following Top of
* Memory (or Top of Usable DRAM). Note it appears that TOUD is only
* reported by 865g, so we just use the top of memory as determined
* by the e820 probe.
*
* On gen2, the layout is slightly different with the Graphics Segment
* immediately following Top of Memory (or Top of Usable DRAM). Note
* it appears that TOUD is only reported by 865g, so we just use the
* top of memory as determined by the e820 probe.
*
* XXX gen2 requires an unavailable symbol and 945gm fails with
* its value of TOLUD.
* XXX However gen2 requires an unavailable symbol.
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 6) {
/* Read Base Data of Stolen Memory Register (BDSM) directly.
* Note that there is also a MCHBAR miror at 0x1080c0 or
* we could use device 2:0x5c instead.
*/
pci_read_config_dword(pdev, 0xB0, &base);
base &= ~4095; /* lower bits used for locking register */
} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
if (INTEL_INFO(dev)->gen >= 3) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
} else { /* GEN2 */
#if 0
} else if (IS_GEN3(dev)) {
u8 val;
/* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
base -= dev_priv->mm.gtt->stolen_size;
} else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
}
 
if (base == 0)
return 0;
#if 0
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
}
#endif
return base;
}
 
92,33 → 94,38
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
int ret;
 
/* Try to over-allocate to reduce reallocations and fragmentation */
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size <<= 1, 4096, 0);
compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
if (!compressed_fb)
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size >>= 1, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
goto err_llb;
 
/* Try to over-allocate to reduce reallocations and fragmentation */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_llb;
 
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
compressed_llb = drm_mm_get_block(compressed_llb,
4096, 4096);
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
 
dev_priv->compressed_llb = compressed_llb;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
4096, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_fb;
 
dev_priv->fbc.compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
I915_WRITE(FBC_LL_BASE,
125,8 → 132,8
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
dev_priv->compressed_fb = compressed_fb;
dev_priv->cfb_size = size;
dev_priv->fbc.compressed_fb = compressed_fb;
dev_priv->fbc.size = size;
 
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
134,8 → 141,11
return 0;
 
err_fb:
drm_mm_put_block(compressed_fb);
err:
kfree(compressed_llb);
drm_mm_remove_node(compressed_fb);
err_llb:
kfree(compressed_fb);
// pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
 
143,10 → 153,10
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->mm.stolen_base == 0)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
 
if (size < dev_priv->cfb_size)
if (size < dev_priv->fbc.size)
return 0;
 
/* Release any current block */
159,16 → 169,20
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->cfb_size == 0)
if (dev_priv->fbc.size == 0)
return;
 
if (dev_priv->compressed_fb)
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->fbc.compressed_fb) {
drm_mm_remove_node(dev_priv->fbc.compressed_fb);
kfree(dev_priv->fbc.compressed_fb);
}
 
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
 
dev_priv->cfb_size = 0;
dev_priv->fbc.size = 0;
}
 
void i915_gem_cleanup_stolen(struct drm_device *dev)
175,6 → 189,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
 
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
}
182,7 → 199,11
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
 
if (dev_priv->gtt.stolen_size == 0)
return 0;
 
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
190,8 → 211,15
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
 
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
 
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0;
 
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
 
return 0;
}
259,9 → 287,7
if (obj == NULL)
return NULL;
 
if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
goto cleanup;
 
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
obj->pages = i915_pages_create_for_stolen(dev,
270,12 → 296,11
goto cleanup;
 
obj->has_dma_mapping = true;
obj->pages_pin_count = 1;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
 
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->cache_level = I915_CACHE_NONE;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
return obj;
 
290,8 → 315,9
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
 
if (dev_priv->mm.stolen_base == 0)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
 
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
298,17 → 324,23
if (size == 0)
return NULL;
 
stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (stolen)
stolen = drm_mm_get_block(stolen, size, 4096);
if (stolen == NULL)
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
 
ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
4096, DRM_MM_SEARCH_DEFAULT);
if (ret) {
kfree(stolen);
return NULL;
}
 
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
 
drm_mm_put_block(stolen);
drm_mm_remove_node(stolen);
kfree(stolen);
return NULL;
}
 
319,10 → 351,13
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
int ret;
 
if (dev_priv->mm.stolen_base == 0)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
 
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
330,17 → 365,21
 
/* KISS and expect everything to be page-aligned */
BUG_ON(stolen_offset & 4095);
BUG_ON(gtt_offset & 4095);
BUG_ON(size & 4095);
 
if (WARN_ON(size == 0))
return NULL;
 
stolen = drm_mm_create_block(&dev_priv->mm.stolen,
stolen_offset, size,
false);
if (stolen == NULL) {
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
 
stolen->start = stolen_offset;
stolen->size = size;
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
return NULL;
}
 
347,34 → 386,50
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
drm_mm_put_block(stolen);
drm_mm_remove_node(stolen);
kfree(stolen);
return NULL;
}
 
/* Some objects just need physical mem from stolen space */
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
 
vma = i915_gem_vma_create(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
}
 
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
gtt_offset, size,
false);
if (obj->gtt_space == NULL) {
vma->node.start = gtt_offset;
vma->node.size = size;
if (drm_mm_initialized(&ggtt->mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
drm_gem_object_unreference(&obj->base);
return NULL;
goto err_vma;
}
} else
obj->gtt_space = I915_GTT_RESERVED;
}
 
obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
 
list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
 
return obj;
 
err_vma:
i915_gem_vma_destroy(vma);
err_out:
drm_mm_remove_node(stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base);
return NULL;
}
 
void
381,7 → 436,8
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_put_block(obj->stolen);
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}
/drivers/video/drm/i915/i915_gem_tiling.c
287,18 → 287,18
return true;
 
if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
 
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
if (i915_gem_obj_ggtt_size(obj) != size)
return false;
 
if (obj->gtt_offset & (size - 1))
if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
 
return true;
378,18 → 378,19
*/
 
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
!i915_gem_obj_ggtt_bound(obj) ||
(i915_gem_obj_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
 
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
ret = i915_gem_object_ggtt_unbind(obj);
}
 
if (ret == 0) {
/drivers/video/drm/i915/i915_irq.c
35,6 → 35,8
#include "i915_trace.h"
#include "intel_drv.h"
 
#define assert_spin_locked(a)
 
static const u32 hpd_ibx[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
69,15 → 71,6
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static const u32 hpd_status_i965[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
87,8 → 80,6
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static void ibx_hpd_irq_setup(struct drm_device *dev);
static void i915_hpd_irq_setup(struct drm_device *dev);
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
105,6 → 96,14
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.deimr &= ~mask;
return;
}
 
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
115,6 → 114,14
static void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.deimr |= mask;
return;
}
 
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
122,6 → 129,330
}
}
 
/**
* ilk_update_gt_irq - update GTIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
interrupt_mask);
return;
}
 
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
 
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
}
 
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
 
/**
* snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t new_val;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
interrupt_mask);
return;
}
 
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
 
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
POSTING_READ(GEN6_PMIMR);
}
}
 
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, mask);
}
 
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
 
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
enum pipe pipe;
 
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (crtc->cpu_fifo_underrun_disabled)
return false;
}
 
return true;
}
 
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct intel_crtc *crtc;
 
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (crtc->pch_fifo_underrun_disabled)
return false;
}
 
return true;
}
 
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
 
if (enable)
ironlake_enable_display_irq(dev_priv, bit);
else
ironlake_disable_display_irq(dev_priv, bit);
}
 
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
 
if (!ivb_can_enable_err_int(dev))
return;
 
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
 
/* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
 
if (!was_enabled &&
(I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
pipe_name(pipe));
}
}
}
 
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled &&
(interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
interrupt_mask);
return;
}
 
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
}
#define ibx_enable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), 0)
 
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
if (enable)
ibx_enable_display_interrupt(dev_priv, bit);
else
ibx_disable_display_interrupt(dev_priv, bit);
}
 
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (enable) {
I915_WRITE(SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
 
if (!cpt_can_enable_serr_int(dev))
return;
 
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
uint32_t tmp = I915_READ(SERR_INT);
bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
 
/* Change the state _after_ we've read out the current one. */
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
 
if (!was_enabled &&
(tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
transcoder_name(pch_transcoder));
}
}
}
 
/**
* intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
* @pipe: pipe
* @enable: true if we want to report FIFO underrun errors, false otherwise
*
* This function makes us disable or enable CPU fifo underruns for a specific
* pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
* reporting for one pipe may also disable all the other CPU error interruts for
* the other pipes, due to the fact that there's just one interrupt mask/enable
* bit for all the pipes.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
ret = !intel_crtc->cpu_fifo_underrun_disabled;
 
if (enable == ret)
goto done;
 
intel_crtc->cpu_fifo_underrun_disabled = !enable;
 
if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
 
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
}
 
/**
* intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
* @enable: true if we want to report FIFO underrun errors, false otherwise
*
* This function makes us disable or enable PCH fifo underruns for a specific
* PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
* underrun reporting for one transcoder may also disable all the other PCH
* error interruts for the other transcoders, due to the fact that there's just
* one interrupt mask/enable bit for all the transcoders.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
 
/*
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
* has only one pch transcoder A that all pipes can use. To avoid racy
* pch transcoder -> pipe lookups from interrupt code simply store the
* underrun statistics in crtc A. Since we never expose this anywhere
* nor use it outside of the fifo underrun code here using the "wrong"
* crtc on LPT won't cause issues.
*/
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
ret = !intel_crtc->pch_fifo_underrun_disabled;
 
if (enable == ret)
goto done;
 
intel_crtc->pch_fifo_underrun_disabled = !enable;
 
if (HAS_PCH_IBX(dev))
ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
 
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
}
 
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
128,6 → 459,8
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if ((pipestat & mask) == mask)
return;
 
143,6 → 476,8
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if ((pipestat & mask) == 0)
return;
 
153,28 → 488,21
 
#if 0
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
*/
void intel_enable_asle(struct drm_device *dev)
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
 
/* FIXME: opregion/asle for VLV */
if (IS_VALLEYVIEW(dev))
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
else {
i915_enable_pipestat(dev_priv, 1,
PIPE_LEGACY_BLC_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
193,11 → 521,17
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Locking is horribly broken here, but whatever. */
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return intel_crtc->active;
} else {
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
}
 
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
346,6 → 680,21
crtc);
}
 
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
{
enum drm_connector_status old_status;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
return (old_status != connector->status);
}
 
/*
* Handle hotplug events outside the interrupt handler proper.
*/
362,6 → 711,8
struct drm_connector *connector;
unsigned long irqflags;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
 
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
371,6 → 722,9
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
intel_encoder = intel_connector->encoder;
385,7 → 739,11
| DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
drm_get_connector_name(connector), intel_encoder->hpd_pin);
}
}
/* if there were no outputs to poll, poll was disabled,
* therefore make sure it's enabled when disabling HPD on
* some connectors */
397,24 → 755,29
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
intel_encoder->hot_plug(intel_encoder);
 
if (intel_hpd_irq_event(dev, connector))
changed = true;
}
}
mutex_unlock(&mode_config->mutex);
 
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
if (changed)
drm_kms_helper_hotplug_event(dev);
}
 
static void ironlake_handle_rps_change(struct drm_device *dev)
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
unsigned long flags;
 
spin_lock_irqsave(&mchdev_lock, flags);
spin_lock(&mchdev_lock);
 
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
442,7 → 805,7
if (ironlake_set_drps(dev, new_delay))
dev_priv->ips.cur_delay = new_delay;
 
spin_unlock_irqrestore(&mchdev_lock, flags);
spin_unlock(&mchdev_lock);
 
return;
}
450,8 → 813,6
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (ring->obj == NULL)
return;
 
458,12 → 819,6
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
wake_up_all(&ring->irq_queue);
// if (i915_enable_hangcheck) {
// dev_priv->hangcheck_count = 0;
// mod_timer(&dev_priv->hangcheck_timer,
// jiffies +
// msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
// }
}
 
#if 0
471,34 → 826,59
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
u32 pm_iir, pm_imr;
u32 pm_iir;
u8 new_delay;
 
spin_lock_irq(&dev_priv->rps.lock);
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps.lock);
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
 
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
 
if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
return;
 
mutex_lock(&dev_priv->rps.hw_lock);
 
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
new_delay = dev_priv->rps.cur_delay + 1;
else
 
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (IS_VALLEYVIEW(dev_priv->dev) &&
dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
} else
new_delay = dev_priv->rps.cur_delay - 1;
 
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
if (!(new_delay > dev_priv->rps.max_delay ||
new_delay < dev_priv->rps.min_delay)) {
if (new_delay >= dev_priv->rps.min_delay &&
new_delay <= dev_priv->rps.max_delay) {
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
else
gen6_set_rps(dev_priv->dev, new_delay);
}
 
if (IS_VALLEYVIEW(dev_priv->dev)) {
/*
* On VLV, when we enter RC6 we may not be at the minimum
* voltage level, so arm a timer to check. It should only
* fire when there's activity or once after we've entered
* RC6, and then won't be re-armed until the next RPS interrupt.
*/
mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
msecs_to_jiffies(100));
}
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
543,13 → 923,12
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
mutex_unlock(&dev_priv->dev->struct_mutex);
 
parity_event[0] = "L3_PARITY_ERROR=1";
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
566,18 → 945,16
kfree(parity_event[1]);
}
 
static void ivybridge_handle_parity_error(struct drm_device *dev)
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
 
if (!HAS_L3_GPU_CACHE(dev))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
spin_lock(&dev_priv->irq_lock);
ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
spin_unlock(&dev_priv->irq_lock);
 
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
584,24 → 961,35
 
#endif
 
static void ilk_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
}
 
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
 
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GEN6_BSD_USER_INTERRUPT)
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
if (gt_iir & GT_BLT_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
 
if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_ERROR_INTERRUPT)) {
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
// i915_handle_error(dev, false);
}
 
// if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
608,50 → 996,32
// ivybridge_handle_parity_error(dev);
}
 
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
u32 pm_iir)
{
unsigned long flags;
 
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
* dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
* The mask bit in IMR is cleared by dev_priv->rps.work.
*/
 
spin_lock_irqsave(&dev_priv->rps.lock, flags);
dev_priv->rps.pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
// queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
 
static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
const u32 *hpd)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
int i;
bool ret = false;
bool storm_detected = false;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (!hotplug_trigger)
return;
 
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
 
WARN(((hpd[i] & hotplug_trigger) &&
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
"Received HPD interrupt although disabled\n");
 
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
 
dev_priv->hpd_event_bits |= (1 << i);
// if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
// dev_priv->hpd_stats[i].hpd_last_jiffies
// + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
666,9 → 1036,11
// }
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (storm_detected)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock);
 
return ret;
 
}
 
static void gmbus_irq_handler(struct drm_device *dev)
685,6 → 1057,31
wake_up_all(&dev_priv->gmbus_wait_queue);
}
 
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & GEN6_PM_RPS_EVENTS) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
spin_unlock(&dev_priv->irq_lock);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
// i915_handle_error(dev_priv->dev, false);
}
}
}
 
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
745,12 → 1142,9
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
}
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
776,15 → 1170,14
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
 
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
port_name(port));
}
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
 
if (pch_iir & SDE_AUX_MASK)
dp_aux_irq_handler(dev);
813,12 → 1206,66
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
 
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
}
 
static void ivb_err_int_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
 
if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n");
 
if (err_int & ERR_INT_FIFO_UNDERRUN_A)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
 
if (err_int & ERR_INT_FIFO_UNDERRUN_B)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
 
if (err_int & ERR_INT_FIFO_UNDERRUN_C)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
 
I915_WRITE(GEN7_ERR_INT, err_int);
}
 
static void cpt_serr_int_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 serr_int = I915_READ(SERR_INT);
 
if (serr_int & SERR_INT_POISON)
DRM_ERROR("PCH poison interrupt\n");
 
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
 
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
 
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
false))
DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
 
I915_WRITE(SERR_INT, serr_int);
}
 
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
825,15 → 1272,14
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
ibx_hpd_irq_setup(dev);
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
 
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
port_name(port));
}
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
 
if (pch_iir & SDE_AUX_MASK_CPT)
dp_aux_irq_handler(dev);
852,142 → 1298,21
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
}
 
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
int i;
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
if (!HAS_PCH_NOP(dev)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev);
}
 
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
 
de_iir = I915_READ(DEIIR);
if (de_iir) {
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev);
#if 0
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
 
for (i = 0; i < 3; i++) {
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
}
#endif
/* check event from PCH */
if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
 
cpt_irq_handler(dev, pch_iir);
 
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
 
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
 
pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
// gen6_queue_rps_work(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
}
 
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
}
 
return ret;
}
 
static void ilk_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
}
struct drm_i915_private *dev_priv = dev->dev_private;
 
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
 
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
 
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
 
if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
goto done;
 
ret = IRQ_HANDLED;
 
if (IS_GEN5(dev))
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
else
snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
 
#if 0
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
intel_opregion_asle_intr(dev);
 
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
995,6 → 1320,18
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
 
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
#endif
 
if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
 
if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
#if 0
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
1018,556 → 1355,234
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
#if 0
 
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
ironlake_handle_rps_change(dev);
 
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
#endif
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
 
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
 
return ret;
ironlake_rps_change_irq_handler(dev);
}
 
 
 
 
/* NB: please notice the memset */
static void i915_get_extra_instdone(struct drm_device *dev,
uint32_t *instdone)
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
 
switch(INTEL_INFO(dev)->gen) {
case 2:
case 3:
instdone[0] = I915_READ(INSTDONE);
break;
case 4:
case 5:
case 6:
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
break;
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
break;
}
}
 
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
const int num_pages)
{
struct drm_i915_error_object *dst;
int i;
u32 reloc_offset;
 
if (src == NULL || src->pages == NULL)
return NULL;
// if (de_iir & DE_ERR_INT_IVB)
// ivb_err_int_handler(dev);
 
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev);
 
reloc_offset = src->gtt_offset;
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
 
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
 
local_irq_save(flags);
if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
 
/* Simply ignore tiling or any overlapping fence.
* It's part of the error state, and this hopefully
* captures what the GPU read.
*/
 
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else if (src->stolen) {
unsigned long offset;
 
offset = dev_priv->mm.stolen_base;
offset += src->stolen->start;
offset += i << PAGE_SHIFT;
 
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else {
struct page *page;
void *s;
 
page = i915_gem_object_get_page(src, i);
 
drm_clflush_pages(&page, 1);
 
s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
 
drm_clflush_pages(&page, 1);
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
#if 0
for (i = 0; i < 3; i++) {
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
local_irq_restore(flags);
 
dst->pages[i] = d;
 
reloc_offset += PAGE_SIZE;
}
dst->page_count = num_pages;
dst->gtt_offset = src->gtt_offset;
#endif
 
return dst;
/* check event from PCH */
if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
 
unwind:
while (i--)
kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
#define i915_error_object_create(dev_priv, src) \
i915_error_object_create_sized((dev_priv), (src), \
(src)->base.size>>PAGE_SHIFT)
cpt_irq_handler(dev, pch_iir);
 
static void
i915_error_object_free(struct drm_i915_error_object *obj)
{
int page;
 
if (obj == NULL)
return;
 
for (page = 0; page < obj->page_count; page++)
kfree(obj->pages[page]);
 
kfree(obj);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
 
void
i915_error_state_free(struct kref *error_ref)
{
struct drm_i915_error_state *error = container_of(error_ref,
typeof(*error), ref);
int i;
 
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
kfree(error->ring[i].requests);
}
 
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
}
static void capture_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
err->size = obj->base.size;
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
err->pinned = 0;
if (obj->pin_count > 0)
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
}
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
bool err_int_reenable = false;
 
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
atomic_inc(&dev_priv->irq_received);
 
list_for_each_entry(obj, head, mm_list) {
capture_bo(err++, obj);
if (++i == count)
break;
}
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
 
return i;
}
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
 
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
 
list_for_each_entry(obj, head, gtt_list) {
if (obj->pin_count == 0)
continue;
 
capture_bo(err++, obj);
if (++i == count)
break;
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
if (!HAS_PCH_NOP(dev)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
}
 
return i;
/* On Haswell, also mask ERR_INT because we don't want to risk
* generating "unclaimed register" interrupts from inside the interrupt
* handler. */
if (IS_HASWELL(dev)) {
spin_lock(&dev_priv->irq_lock);
err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
if (err_int_reenable)
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
 
static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
 
default:
BUG();
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
}
 
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
u32 seqno;
 
if (!ring->get_seqno)
return NULL;
 
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);
 
if (WARN_ON(ring->id != RCS))
return NULL;
 
obj = ring->private;
if (acthd >= obj->gtt_offset &&
acthd < obj->gtt_offset + obj->base.size)
return i915_error_object_create(dev_priv, obj);
de_iir = I915_READ(DEIIR);
if (de_iir) {
if (INTEL_INFO(dev)->gen >= 7)
ivb_display_irq_handler(dev, de_iir);
else
ilk_display_irq_handler(dev, de_iir);
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
 
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
 
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
 
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
 
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
if (INTEL_INFO(dev)->gen >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
}
 
return NULL;
}
 
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen >= 6) {
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
if (err_int_reenable) {
spin_lock(&dev_priv->irq_lock);
if (ivb_can_enable_err_int(dev))
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
 
if (INTEL_INFO(dev)->gen >= 4) {
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
}
 
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
error->ctl[ring->id] = I915_READ_CTL(ring);
 
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
return ret;
}
 
 
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
 
/* Currently render ring is the only HW context user */
if (ring->id != RCS || !error->ccid)
return;
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
}
}
}
 
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
int i;
 
for_each_ring(ring, dev_priv, i) {
i915_record_ring_state(dev, error, ring);
/*
* Notify all waiters for GPU completion events that reset state has
* been changed, and that they need to restart their wait after
* checking for potential errors (and bail out to drop locks if there is
* a gpu reset pending so that i915_error_work_func can acquire them).
*/
 
error->ring[i].batchbuffer =
i915_error_first_batchbuffer(dev_priv, ring);
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
 
error->ring[i].ringbuffer =
i915_error_object_create(dev_priv, ring->obj);
 
 
i915_gem_record_active_context(ring, error, &error->ring[i]);
 
count = 0;
list_for_each_entry(request, &ring->request_list, list)
count++;
 
error->ring[i].num_requests = count;
error->ring[i].requests =
kmalloc(count*sizeof(struct drm_i915_error_request),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
continue;
/*
* Signal tasks blocked in i915_gem_wait_for_error that the pending
* reset state is cleared.
*/
if (reset_completed)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
 
count = 0;
list_for_each_entry(request, &ring->request_list, list) {
struct drm_i915_error_request *erq;
 
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
erq->tail = request->tail;
}
}
}
 
#if 0
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
* i915_error_work_func - do process context error handling work
* @work: work struct
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
* out a structure which becomes available in debugfs for user level tools
* to pick up.
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
static void i915_capture_error_state(struct drm_device *dev)
static void i915_error_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
unsigned long flags;
int i, pipe;
struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
work);
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_device *dev = dev_priv->dev;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
/*
* Note that there's only one work item which does gpu resets, so we
* need not worry about concurrent gpu resets potentially incrementing
* error->reset_counter twice. We only need to take care of another
* racing irq/hangcheck declaring the gpu dead for a second time. A
* quick check for that is good enough: schedule_work ensures the
* correct ordering between hang detection and this work item, and since
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
reset_event);
 
DRM_INFO("capturing error event; look for more information in "
"/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
ret = i915_reset(dev);
 
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
intel_display_handle_reset(dev);
 
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
else if (IS_VALLEYVIEW(dev))
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
else if (IS_GEN2(dev))
error->ier = I915_READ16(IER);
else
error->ier = I915_READ(IER);
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
* counter and wake up everyone waiting for the reset to
* complete.
*
* Since unlock operations are a one-sided barrier only,
* we need to insert a barrier here to order any seqno
* updates before
* the counter increment.
*/
smp_mb__before_atomic_inc();
atomic_inc(&dev_priv->gpu_error.reset_counter);
 
if (INTEL_INFO(dev)->gen >= 6)
error->derrmr = I915_READ(DERRMR);
 
if (IS_VALLEYVIEW(dev))
error->forcewake = I915_READ(FORCEWAKE_VLV);
else if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
else if (INTEL_INFO(dev)->gen == 6)
error->forcewake = I915_READ(FORCEWAKE);
 
if (!HAS_PCH_SPLIT(dev))
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
kobject_uevent_env(&dev->primary->kdev.kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
}
 
if (INTEL_INFO(dev)->gen == 7)
error->err_int = I915_READ(GEN7_ERR_INT);
 
i915_get_extra_instdone(dev, error->extra_instdone);
 
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
 
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
error->pinned_bo = NULL;
 
i = 0;
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
 
error->active_bo = NULL;
error->pinned_bo = NULL;
if (i) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
GFP_ATOMIC);
if (error->active_bo)
error->pinned_bo =
error->active_bo + error->active_bo_count;
/*
* Note: The wake_up also serves as a memory barrier so that
* waiters see the update value of the reset counter atomic_t.
*/
i915_error_wake_up(dev_priv, true);
}
 
if (error->active_bo)
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
&dev_priv->mm.active_list);
 
if (error->pinned_bo)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
&dev_priv->mm.bound_list);
 
do_gettimeofday(&error->time);
 
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
error = NULL;
}
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
if (error)
i915_error_state_free(&error->ref);
}
 
void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
if (error)
kref_put(&error->ref, i915_error_state_free);
}
#else
#define i915_capture_error_state(x)
#endif
 
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1673,8 → 1688,6
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
 
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
1684,19 → 1697,30
&dev_priv->gpu_error.reset_counter);
 
/*
* Wakeup waiting processes so that the reset work item
* doesn't deadlock trying to grab various locks.
* Wakeup waiting processes so that the reset work function
* i915_error_work_func doesn't deadlock trying to grab various
* locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks
* that the reset work needs to acquire.
*
* Note: The wake_up serves as the required memory barrier to
* ensure that the waiters see the updated value of the reset
* counter atomic_t.
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
i915_error_wake_up(dev_priv, false);
}
 
// queue_work(dev_priv->wq, &dev_priv->error_work);
/*
* Our reset work can grab modeset locks (since it needs to reset the
* state of outstanding pagelips). Hence it must not be run on our own
* dev-priv->wq work queue for otherwise the flush_work in the pageflip
* code will deadlock.
*/
schedule_work(&dev_priv->gpu_error.work);
}
 
#if 0
 
 
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
1727,10 → 1751,10
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
obj->gtt_offset;
i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
1776,34 → 1800,19
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK_ILK(pipe);
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv,
DE_PIPEA_VBLANK_IVB << (5 * pipe));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1849,24 → 1858,14
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK_ILK(pipe);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
ironlake_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv,
DE_PIPEA_VBLANK_IVB << (pipe * 5));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1891,27 → 1890,224
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
}
/* drm_dma.h hooks
 
static bool
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
}
 
static struct intel_ring_buffer *
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, acthd, acthd_min;
 
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if ((ipehr & ~(0x3 << 16)) !=
(MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
return NULL;
 
/* ACTHD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX.
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
acthd_min = max((int)acthd - 3 * 4, 0);
do {
cmd = ioread32(ring->virtual_start + acthd);
if (cmd == ipehr)
break;
 
acthd -= 4;
if (acthd < acthd_min)
return NULL;
} while (1);
 
*seqno = ioread32(ring->virtual_start+acthd+4)+1;
return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
}
 
static int semaphore_passed(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ring_buffer *signaller;
u32 seqno, ctl;
 
atomic_set(&dev_priv->irq_received, 0);
ring->hangcheck.deadlock = true;
 
I915_WRITE(HWSTAM, 0xeffe);
signaller = semaphore_waits_for(ring, &seqno);
if (signaller == NULL || signaller->hangcheck.deadlock)
return -1;
 
/* XXX hotplug from PCH */
/* cursory check for an unkickable deadlock */
ctl = I915_READ_CTL(signaller);
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
 
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
}
 
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
int i;
 
for_each_ring(ring, dev_priv, i)
ring->hangcheck.deadlock = false;
}
 
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
if (ring->hangcheck.acthd != acthd)
return HANGCHECK_ACTIVE;
 
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
 
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
* and break the hang. This should work on
* all but the second generation chipsets.
*/
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
}
 
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(ring)) {
default:
return HANGCHECK_HUNG;
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
case 0:
return HANGCHECK_WAIT;
}
}
 
return HANGCHECK_HUNG;
}
 
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
* Further, acthd is inspected to see if the ring is stuck. On stuck case
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
#define BUSY 1
#define KICK 5
#define HUNG 20
#define FIRE 30
 
if (!i915_enable_hangcheck)
return;
 
for_each_ring(ring, dev_priv, i) {
u32 seqno, acthd;
bool busy = true;
 
semaphore_clear_deadlocks(dev_priv);
 
seqno = ring->get_seqno(ring, false);
acthd = intel_ring_get_active_head(ring);
 
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
// if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
// DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
// ring->name);
// wake_up_all(&ring->irq_queue);
// ring->hangcheck.score += HUNG;
// } else
busy = false;
} else {
/* We always increment the hangcheck score
* if the ring is busy and still processing
* the same request, so that no single request
* can run indefinitely (such as a chain of
* batches). The only time we do not increment
* the hangcheck score on this ring, if this
* ring is in a legitimate wait for another
* ring. In that case the waiting ring is a
* victim and we want to be sure we catch the
* right culprit. Then every time we do kick
* the ring, add a small increment to the
* score so that we can catch a batch that is
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
ring->hangcheck.action = ring_stuck(ring,
acthd);
 
switch (ring->hangcheck.action) {
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
ring->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
ring->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
ring->hangcheck.score += HUNG;
stuck[i] = true;
break;
}
}
} else {
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
}
 
ring->hangcheck.seqno = seqno;
ring->hangcheck.acthd = acthd;
busy_count += busy;
}
 
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
rings_hung++;
}
}
 
// if (rings_hung)
// return i915_handle_error(dev, true);
 
}
 
static void ibx_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_NOP(dev))
return;
 
1927,6 → 2123,42
POSTING_READ(SDEIER);
}
 
static void gen5_gt_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
 
if (INTEL_INFO(dev)->gen >= 6) {
/* and PM */
I915_WRITE(GEN6_PMIMR, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0x0);
POSTING_READ(GEN6_PMIER);
}
}
 
/* drm_dma.h hooks
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
atomic_set(&dev_priv->irq_received, 0);
 
I915_WRITE(HWSTAM, 0xeffe);
 
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
 
gen5_gt_irq_preinstall(dev);
 
ibx_irq_preinstall(dev);
}
 
static void valleyview_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1943,10 → 2175,9
/* and GT */
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
 
gen5_gt_irq_preinstall(dev);
 
I915_WRITE(DPINVGTT, 0xff);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
1964,22 → 2195,21
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 mask = ~I915_READ(SDEIMR);
u32 hotplug;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
 
if (HAS_PCH_IBX(dev)) {
mask &= ~SDE_HOTPLUG_MASK;
hotplug_irqs = SDE_HOTPLUG_MASK;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
mask |= hpd_ibx[intel_encoder->hpd_pin];
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
mask &= ~SDE_HOTPLUG_MASK_CPT;
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
mask |= hpd_cpt[intel_encoder->hpd_pin];
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
 
I915_WRITE(SDEIMR, ~mask);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
2000,101 → 2230,110
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 mask;
 
if (HAS_PCH_IBX(dev))
mask = SDE_GMBUS | SDE_AUX_MASK;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
 
if (HAS_PCH_NOP(dev))
return;
 
if (HAS_PCH_IBX(dev)) {
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
SDE_TRANSA_FIFO_UNDER | SDE_POISON;
} else {
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
 
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, ~mask);
}
 
static int ironlake_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A;
u32 render_irqs;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pm_irqs, gt_irqs;
 
dev_priv->irq_mask = ~display_mask;
pm_irqs = gt_irqs = 0;
 
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
POSTING_READ(DEIER);
 
dev_priv->gt_irq_mask = ~0;
if (HAS_L3_GPU_CACHE(dev)) {
/* L3 parity interrupt is always unmasked. */
dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
}
 
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN5(dev)) {
gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
GT_PIPE_NOTIFY |
GT_BSD_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
I915_WRITE(GTIER, gt_irqs);
POSTING_READ(GTIER);
 
ibx_irq_postinstall(dev);
if (INTEL_INFO(dev)->gen >= 6) {
pm_irqs |= GEN6_PM_RPS_EVENTS;
 
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
}
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
return 0;
dev_priv->pm_irq_mask = 0xffffffff;
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
I915_WRITE(GEN6_PMIER, pm_irqs);
POSTING_READ(GEN6_PMIER);
}
}
 
static int ivybridge_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
unsigned long irqflags;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask =
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB |
u32 display_mask, extra_mask;
 
if (INTEL_INFO(dev)->gen >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB |
DE_AUX_CHANNEL_A_IVB;
u32 render_irqs;
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
DE_ERR_INT_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB);
 
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
}
 
dev_priv->irq_mask = ~display_mask;
 
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER,
display_mask |
DE_PIPEC_VBLANK_IVB |
DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB);
I915_WRITE(DEIER, display_mask | extra_mask);
POSTING_READ(DEIER);
 
dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
gen5_gt_irq_postinstall(dev);
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
ibx_irq_postinstall(dev);
 
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
if (IS_IRONLAKE_M(dev)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
ibx_irq_postinstall(dev);
 
return 0;
}
 
2103,8 → 2342,7
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
unsigned long irqflags;
 
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2120,13 → 2358,6
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
/* Hack for broken MSIs on VLV */
// pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
// pci_read_config_word(dev->pdev, 0x98, &msid);
// msid &= 0xff; /* mask out delivery bits */
// msid |= (1<<14);
// pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
2137,21 → 2368,19
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
gen5_gt_irq_postinstall(dev);
 
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
 
/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2197,6 → 2426,8
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
I915_WRITE(DEIIR, I915_READ(DEIIR));
if (IS_GEN7(dev))
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
 
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
2208,6 → 2439,8
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
 
#if 0
2290,7 → 2523,6
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
int irq_received;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2309,8 → 2541,8
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
// if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
// i915_handle_error(dev, false);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
2324,7 → 2556,6
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = 1;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2485,8 → 2716,8
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
// if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
// i915_handle_error(dev, false);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
2514,12 → 2745,9
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
}
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
}
2615,6 → 2843,7
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 error_mask;
unsigned long irqflags;
 
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2633,7 → 2862,11
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
/*
* Enable some error detection, note the instruction error mask
2669,6 → 2902,8
struct intel_encoder *intel_encoder;
u32 hotplug_en;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (I915_HAS_HOTPLUG(dev)) {
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2719,8 → 2954,8
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
// if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
// i915_handle_error(dev, false);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
2749,17 → 2984,14
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
HOTPLUG_INT_STATUS_G4X :
HOTPLUG_INT_STATUS_I965);
HOTPLUG_INT_STATUS_I915);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
}
 
intel_hpd_irq_handler(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
2843,6 → 3075,7
// pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
 
// dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
2849,12 → 3082,6
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
2881,6 → 3108,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
unsigned long irqflags;
int i;
 
for (i = 1; i < HPD_NUM_PINS; i++) {
2893,66 → 3121,87
if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
 
irqreturn_t intel_irq_handler(struct drm_device *dev)
/* Disable interrupts so we can allow Package C8+. */
void hsw_pc8_disable_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
// printf("i915 irq\n");
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
 
return dev->driver->irq_handler(0, dev);
ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff);
 
dev_priv->pc8.irqs_disabled = true;
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
int drm_irq_install(struct drm_device *dev)
/* Restore interrupts so we can recover from Package C8+. */
void hsw_pc8_restore_interrupts(struct drm_device *dev)
{
unsigned long sh_flags = 0;
int irq_line;
int ret = 0;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t val, expected;
 
char *irqname;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
mutex_lock(&dev->struct_mutex);
val = I915_READ(DEIMR);
expected = ~DE_PCH_EVENT_IVB;
WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
 
/* Driver must have been initialized */
if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
expected = ~SDE_HOTPLUG_MASK_CPT;
WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
val, expected);
 
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
val = I915_READ(GTIMR);
expected = 0xffffffff;
WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
 
irq_line = drm_dev_to_irq(dev);
val = I915_READ(GEN6_PMIMR);
expected = 0xffffffff;
WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
expected);
 
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
dev_priv->pc8.irqs_disabled = false;
 
/* Before installing handler */
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
ibx_enable_display_interrupt(dev_priv,
~dev_priv->pc8.regsave.sdeimr &
~SDE_HOTPLUG_MASK_CPT);
ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
 
ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
/* After installing handler */
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
 
if (ret < 0) {
DRM_ERROR(__FUNCTION__);
}
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
 
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
cmd&= ~(1<<10);
PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
// printf("i915 irq\n");
 
return ret;
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
return dev->driver->irq_handler(0, dev);
}
 
/drivers/video/drm/i915/i915_reg.h
33,21 → 33,6
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
 
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
* This is all handled in the intel-gtt.ko module. i915.ko only
* cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
 
 
/* PCI config space */
 
#define HPLLCC 0xc0 /* 855 only */
61,6 → 46,12
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
#define GC_DISPLAY_CLOCK_MASK (7 << 4)
#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
147,15 → 138,9
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
 
/*
* SR01 is the only VGA register touched on non-UMS setups.
* VLV doesn't do UMS, so the sequencer index/data registers
* are the only VGA registers which need to include
* display_mmio_offset.
*/
#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
#define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
#define VGA_SR_DATA 0x3c5
 
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
245,6 → 230,7
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
265,13 → 251,19
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
#define MI_SEMAPHORE_REGISTER (1<<18)
#define MI_SEMAPHORE_SYNC_RV (2<<16)
#define MI_SEMAPHORE_SYNC_RB (0<<16)
#define MI_SEMAPHORE_SYNC_VR (0<<16)
#define MI_SEMAPHORE_SYNC_VB (2<<16)
#define MI_SEMAPHORE_SYNC_BR (2<<16)
#define MI_SEMAPHORE_SYNC_BV (0<<16)
#define MI_SEMAPHORE_SYNC_INVALID (1<<0)
#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
/*
* 3D instructions used by the kernel
*/
342,25 → 334,55
#define DEBUG_RESET_DISPLAY (1<<9)
 
/*
* DPIO - a special bus for various display related registers to hide behind:
* 0x800c: m1, m2, n, p1, p2, k dividers
* 0x8014: REF and SFR select
* 0x8014: N divider, VCO select
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
* IOSF sideband
*/
#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100)
#define IOSF_DEVFN_SHIFT 24
#define IOSF_OPCODE_SHIFT 16
#define IOSF_PORT_SHIFT 8
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
 
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
 
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
 
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
 
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
 
/*
* DPIO - a special bus for various display related registers to hide behind
*
* DPIO is VLV only.
*
* Note: digital port B is DDI0, digital pot C is DDI1
*/
#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
#define DPIO_DEVFN 0
#define DPIO_OPCODE_REG_WRITE 1
#define DPIO_OPCODE_REG_READ 0
 
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
367,8 → 389,20
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_RESET (1<<0)
 
#define _DPIO_TX3_SWING_CTL4_A 0x690
#define _DPIO_TX3_SWING_CTL4_B 0x2a90
#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
_DPIO_TX3_SWING_CTL4_B)
 
/*
* Per pipe/PLL DPIO regs
*/
#define _DPIO_DIV_A 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_POST_DIV_DAC 0
#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
#define DPIO_POST_DIV_LVDS1 2
#define DPIO_POST_DIV_LVDS2 3
#define DPIO_K_SHIFT (24) /* 4 bits */
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
394,12 → 428,109
#define _DPIO_CORE_CLK_B 0x803c
#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
 
#define _DPIO_LFP_COEFF_A 0x8048
#define _DPIO_LFP_COEFF_B 0x8068
#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
#define _DPIO_IREF_CTL_A 0x8040
#define _DPIO_IREF_CTL_B 0x8060
#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
 
#define DPIO_IREF_BCAST 0xc044
#define _DPIO_IREF_A 0x8044
#define _DPIO_IREF_B 0x8064
#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
 
#define _DPIO_PLL_CML_A 0x804c
#define _DPIO_PLL_CML_B 0x806c
#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
 
#define _DPIO_LPF_COEFF_A 0x8048
#define _DPIO_LPF_COEFF_B 0x8068
#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
 
#define DPIO_CALIBRATION 0x80ac
 
#define DPIO_FASTCLK_DISABLE 0x8100
 
/*
* Per DDI channel DPIO regs
*/
 
#define _DPIO_PCS_TX_0 0x8200
#define _DPIO_PCS_TX_1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
 
#define _DPIO_PCS_CLK_0 0x8204
#define _DPIO_PCS_CLK_1 0x8404
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
 
#define _DPIO_PCS_CTL_OVR1_A 0x8224
#define _DPIO_PCS_CTL_OVR1_B 0x8424
#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
_DPIO_PCS_CTL_OVR1_B)
 
#define _DPIO_PCS_STAGGER0_A 0x822c
#define _DPIO_PCS_STAGGER0_B 0x842c
#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
_DPIO_PCS_STAGGER0_B)
 
#define _DPIO_PCS_STAGGER1_A 0x8230
#define _DPIO_PCS_STAGGER1_B 0x8430
#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
_DPIO_PCS_STAGGER1_B)
 
#define _DPIO_PCS_CLOCKBUF0_A 0x8238
#define _DPIO_PCS_CLOCKBUF0_B 0x8438
#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
_DPIO_PCS_CLOCKBUF0_B)
 
#define _DPIO_PCS_CLOCKBUF8_A 0x825c
#define _DPIO_PCS_CLOCKBUF8_B 0x845c
#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
_DPIO_PCS_CLOCKBUF8_B)
 
#define _DPIO_TX_SWING_CTL2_A 0x8288
#define _DPIO_TX_SWING_CTL2_B 0x8488
#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
_DPIO_TX_SWING_CTL2_B)
 
#define _DPIO_TX_SWING_CTL3_A 0x828c
#define _DPIO_TX_SWING_CTL3_B 0x848c
#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
_DPIO_TX_SWING_CTL3_B)
 
#define _DPIO_TX_SWING_CTL4_A 0x8290
#define _DPIO_TX_SWING_CTL4_B 0x8490
#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
_DPIO_TX_SWING_CTL4_B)
 
#define _DPIO_TX_OCALINIT_0 0x8294
#define _DPIO_TX_OCALINIT_1 0x8494
#define DPIO_TX_OCALINIT_EN (1<<31)
#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
_DPIO_TX_OCALINIT_1)
 
#define _DPIO_TX_CTL_0 0x82ac
#define _DPIO_TX_CTL_1 0x84ac
#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
 
#define _DPIO_TX_LANE_0 0x82b8
#define _DPIO_TX_LANE_1 0x84b8
#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
 
#define _DPIO_DATA_CHANNEL1 0x8220
#define _DPIO_DATA_CHANNEL2 0x8420
#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
 
#define _DPIO_PORT0_PCS0 0x0220
#define _DPIO_PORT0_PCS1 0x0420
#define _DPIO_PORT1_PCS2 0x2620
#define _DPIO_PORT1_PCS3 0x2820
#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
 
443,6 → 574,7
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
#define VEBOX_RING_BASE 0x1a000
#define BLT_RING_BASE 0x22000
#define RING_TAIL(base) ((base)+0x30)
#define RING_HEAD(base) ((base)+0x34)
450,12 → 582,20
#define RING_CTL(base) ((base)+0x3c)
#define RING_SYNC_0(base) ((base)+0x40)
#define RING_SYNC_1(base) ((base)+0x44)
#define RING_SYNC_2(base) ((base)+0x48)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE))
#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE))
#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
467,6 → 607,7
#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
527,13 → 668,35
 
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERR_INT_POISON (1<<31)
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
 
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
 
#define DERRMR 0x44050
#define DERRMR_PIPEA_SCANLINE (1<<0)
#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
#define DERRMR_PIPEA_VBLANK (1<<3)
#define DERRMR_PIPEA_HBLANK (1<<5)
#define DERRMR_PIPEB_SCANLINE (1<<8)
#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
#define DERRMR_PIPEB_VBLANK (1<<11)
#define DERRMR_PIPEB_HBLANK (1<<13)
/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
#define DERRMR_PIPEC_SCANLINE (1<<14)
#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
#define DERRMR_PIPEC_VBLANK (1<<21)
#define DERRMR_PIPEC_HBLANK (1<<22)
 
 
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
583,24 → 746,7
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
#define I915_HWB_OOM_INTERRUPT (1<<13)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
#define I915_BSD_USER_INTERRUPT (1<<25)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
617,6 → 763,8
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1<<9)
#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
712,28 → 860,6
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
 
/* GEN6 interrupt control
* Note that the per-ring interrupt bits do alias with the global interrupt bits
* in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
#define GEN6_RENDER_SYNC_STATUS (1 << 2)
#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
 
#define GEN6_BLITTER_HWSTAM 0x22098
#define GEN6_BLITTER_IMR 0x220a8
#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
 
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
744,10 → 870,53
#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
#define GEN6_BSD_GO_INDICATOR (1 << 4)
 
#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
/* On modern GEN architectures interrupt control consists of two sets
* of registers. The first set pertains to the ring generating the
* interrupt. The second control is for the functional block generating the
* interrupt. These are PM, GT, DE, etc.
*
* Luckily *knocks on wood* all the ring interrupt bits match up with the
* GT interrupt bits, so we don't need to duplicate the defines.
*
* These defines should cover us well from SNB->HSW with minor exceptions
* it can also work on ILK.
*/
#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
#define GT_BLT_CS_ERROR_INTERRUPT (1 << 25)
#define GT_BLT_USER_INTERRUPT (1 << 22)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
#define GT_RENDER_USER_INTERRUPT (1 << 0)
 
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
 
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
#define I915_HWB_OOM_INTERRUPT (1<<13)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
#define I915_BSD_USER_INTERRUPT (1 << 25)
 
#define GEN6_BSD_RNCID 0x12198
 
#define GEN7_FF_THREAD_MODE 0x20a0
807,7 → 976,9
#define DPFC_CTL_EN (1<<31)
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define IVB_DPFC_CTL_PLANE_SHIFT (29)
#define DPFC_CTL_FENCE_EN (1<<29)
#define IVB_DPFC_CTL_FENCE_EN (1<<28)
#define DPFC_CTL_PERSISTENT_MODE (1<<25)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
840,6 → 1011,7
#define ILK_DPFC_CHICKEN 0x43224
#define ILK_FBC_RT_BASE 0x2128
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
 
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
855,7 → 1027,26
#define SNB_CPU_FENCE_ENABLE (1<<29)
#define DPFC_CPU_FENCE_OFFSET 0x100104
 
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE 0x7020
 
#define IPS_CTL 0x43408
#define IPS_ENABLE (1 << 31)
 
#define MSG_FBC_REND_STATE 0x50380
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
 
#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
#define HSW_BYPASS_FBC_QUEUE (1<<22)
#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
 
#define HSW_CLKGATE_DISABLE_PART_1 0x46500
#define HSW_DPFC_GATING_DISABLE (1<<23)
 
/*
* GPIO regs
*/
947,7 → 1138,8
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SDVO_HIGH_SPEED (1 << 30)
#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
963,7 → 1155,10
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
 
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
1073,7 → 1268,7
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
#define DSPCLK_GATE_D 0x6200
#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
1186,6 → 1381,8
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
 
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
 
/*
* Palette regs
*/
1255,6 → 1452,8
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
 
#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
 
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
1511,6 → 1710,19
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
/*
* Notes on SNB/IVB/VLV context size:
* - Power context is saved elsewhere (LLC or stolen)
* - Ring/execlist context is saved on SNB, not on IVB
* - Extended context size already includes render context size
* - We always need to follow the extended context size.
* SNB BSpec has comments indicating that we should use the
* render context size instead if execlists are disabled, but
* based on empirical testing that's just nonsense.
* - Pipelined/VF state is saved on SNB/IVB respectively
* - GT1 size just indicates how much of render context
* doesn't need saving on GT1
*/
#define CXT_SIZE 0x21a0
#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
1517,9 → 1729,7
#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_RENDER_SIZE(cxt_reg) + \
#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CXT_SIZE 0x21a8
1529,21 → 1739,16
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
GEN7_CXT_RING_SIZE(ctx_reg) + \
GEN7_CXT_RENDER_SIZE(ctx_reg) + \
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
HSW_CXT_RING_SIZE(ctx_reg) + \
HSW_CXT_RENDER_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
* size is 70720 bytes, however, the power context and execlist context will
* never be saved (power context is stored elsewhere, and execlists don't work
* on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
 
 
/*
* Overlay regs
*/
1594,6 → 1799,71
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
/* HSW eDP PSR registers */
#define EDP_PSR_CTL 0x64800
#define EDP_PSR_ENABLE (1<<31)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
#define EDP_PSR_TP1_TP2_SEL (0<<11)
#define EDP_PSR_TP1_TP3_SEL (1<<11)
#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
#define EDP_PSR_TP1_TIME_500us (0<<4)
#define EDP_PSR_TP1_TIME_100us (1<<4)
#define EDP_PSR_TP1_TIME_2500us (2<<4)
#define EDP_PSR_TP1_TIME_0us (3<<4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
 
#define EDP_PSR_AUX_CTL 0x64810
#define EDP_PSR_AUX_DATA1 0x64814
#define EDP_PSR_DPCD_COMMAND 0x80060000
#define EDP_PSR_AUX_DATA2 0x64818
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
#define EDP_PSR_AUX_DATA3 0x6481c
#define EDP_PSR_AUX_DATA4 0x64820
#define EDP_PSR_AUX_DATA5 0x64824
 
#define EDP_PSR_STATUS_CTL 0x64840
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
#define EDP_PSR_STATUS_LINK_MASK (3<<26)
#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
#define EDP_PSR_STATUS_COUNT_SHIFT 16
#define EDP_PSR_STATUS_COUNT_MASK 0xf
#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
 
#define EDP_PSR_PERF_CNT 0x64844
#define EDP_PSR_PERF_CNT_MASK 0xffffff
 
#define EDP_PSR_DEBUG_CTL 0x64860
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
 
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
1674,10 → 1944,16
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
 
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
/* HDMI/DP bits are gen4+ */
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
/*
* HDMI/DP bits are gen4+
*
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
1691,6 → 1967,12
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
/*
* Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
* since reality corrobates that they're the same as on gen3. But keep these
* bits here (and the comment!) to help any other lost wanderers back onto the
* right tracks.
*/
#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
1702,13 → 1984,6
PORTC_HOTPLUG_INT_STATUS | \
PORTD_HOTPLUG_INT_STATUS)
 
#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
SDVOB_HOTPLUG_INT_STATUS_I965 | \
SDVOC_HOTPLUG_INT_STATUS_I965 | \
PORTB_HOTPLUG_INT_STATUS | \
PORTC_HOTPLUG_INT_STATUS | \
PORTD_HOTPLUG_INT_STATUS)
 
#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
SDVOB_HOTPLUG_INT_STATUS_I915 | \
SDVOC_HOTPLUG_INT_STATUS_I915 | \
1864,6 → 2139,7
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
1967,6 → 2243,10
#define BLM_PIPE_A (0 << 29)
#define BLM_PIPE_B (1 << 29)
#define BLM_PIPE_C (2 << 29) /* ivb + */
#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
#define BLM_TRANSCODER_B BLM_PIPE_B
#define BLM_TRANSCODER_C BLM_PIPE_C
#define BLM_TRANSCODER_EDP (3 << 29)
#define BLM_PIPE(pipe) ((pipe) << 29)
#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
2007,6 → 2287,8
#define BLC_PWM_CPU_CTL2 0x48250
#define BLC_PWM_CPU_CTL 0x48254
 
#define HSW_BLC_PWM2_CTL 0x48350
 
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250
2015,6 → 2297,12
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254
 
#define UTIL_PIN_CTL 0x48400
#define UTIL_PIN_ENABLE (1 << 31)
 
#define PCH_GTC_CTL 0xe7000
#define PCH_GTC_ENABLE (1 << 31)
 
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
2540,9 → 2828,7
#define DP_PRE_EMPHASIS_SHIFT 22
 
/* How many wires to use. I guess 3 was too hard */
#define DP_PORT_WIDTH_1 (0 << 19)
#define DP_PORT_WIDTH_2 (1 << 19)
#define DP_PORT_WIDTH_4 (3 << 19)
#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
#define DP_PORT_WIDTH_MASK (7 << 19)
 
/* Mystic DPCD version 1.1 special mode */
2646,18 → 2932,20
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
#define _PIPEA_GMCH_DATA_M 0x70050
#define _PIPEB_GMCH_DATA_M 0x71050
#define _PIPEA_DATA_M_G4X 0x70050
#define _PIPEB_DATA_M_G4X 0x71050
 
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_SHIFT 25
#define TU_SIZE_MASK (0x3f << 25)
 
#define DATA_LINK_M_N_MASK (0xffffff)
#define DATA_LINK_N_MAX (0x800000)
 
#define _PIPEA_GMCH_DATA_N 0x70054
#define _PIPEB_GMCH_DATA_N 0x71054
#define _PIPEA_DATA_N_G4X 0x70054
#define _PIPEB_DATA_N_G4X 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
 
/*
* Computing Link M and N values for the Display Port link
2670,16 → 2958,18
* Attributes and VB-ID.
*/
 
#define _PIPEA_DP_LINK_M 0x70060
#define _PIPEB_DP_LINK_M 0x71060
#define _PIPEA_LINK_M_G4X 0x70060
#define _PIPEB_LINK_M_G4X 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
 
#define _PIPEA_DP_LINK_N 0x70064
#define _PIPEB_DP_LINK_N 0x71064
#define _PIPEA_LINK_N_G4X 0x70064
#define _PIPEB_LINK_N_G4X 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
 
#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
 
/* Display & cursor control */
 
2715,6 → 3005,7
#define PIPECONF_INTERLACED_ILK (3 << 21)
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
2915,6 → 3206,10
#define WM3S_LP_IVB 0x45128
#define WM1S_LP_EN (1<<31)
 
#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
 
/* Memory latency timer register */
#define MLTR_ILK 0x11222
#define MLTR_WM1_SHIFT 0
2921,9 → 3216,6
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
 
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
2970,12 → 3262,6
#define SSKPD_WM2_SHIFT 16
#define SSKPD_WM3_SHIFT 24
 
#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
 
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
3027,6 → 3313,7
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
3294,7 → 3581,7
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
#define _SPACNTR 0x72180
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
#define SP_ENABLE (1<<31)
#define SP_GEAMMA_ENABLE (1<<30)
#define SP_PIXFORMAT_MASK (0xf<<26)
3313,30 → 3600,30
#define SP_YUV_ORDER_YVYU (2<<16)
#define SP_YUV_ORDER_VYUY (3<<16)
#define SP_TILED (1<<10)
#define _SPALINOFF 0x72184
#define _SPASTRIDE 0x72188
#define _SPAPOS 0x7218c
#define _SPASIZE 0x72190
#define _SPAKEYMINVAL 0x72194
#define _SPAKEYMSK 0x72198
#define _SPASURF 0x7219c
#define _SPAKEYMAXVAL 0x721a0
#define _SPATILEOFF 0x721a4
#define _SPACONSTALPHA 0x721a8
#define _SPAGAMC 0x721f4
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
 
#define _SPBCNTR 0x72280
#define _SPBLINOFF 0x72284
#define _SPBSTRIDE 0x72288
#define _SPBPOS 0x7228c
#define _SPBSIZE 0x72290
#define _SPBKEYMINVAL 0x72294
#define _SPBKEYMSK 0x72298
#define _SPBSURF 0x7229c
#define _SPBKEYMAXVAL 0x722a0
#define _SPBTILEOFF 0x722a4
#define _SPBCONSTALPHA 0x722a8
#define _SPBGAMC 0x722f4
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
 
#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
3474,6 → 3761,15
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
 
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
#define GAMMA_MODE_MODE_MASK (3 << 0)
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
#define GAMMA_MODE_MODE_SPLIT (3 << 0)
 
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29)
3502,7 → 3798,7
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
 
/* More Ivybridge lolz */
#define DE_ERR_DEBUG_IVB (1<<30)
#define DE_ERR_INT_IVB (1<<30)
#define DE_GSE_IVB (1<<29)
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
3517,6 → 3813,9
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
 
#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
 
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
 
3525,21 → 3824,6
#define DEIIR 0x44008
#define DEIER 0x4400c
 
/* GT interrupt.
* Note that for gen6+ the ring-specific interrupt bits do alias with the
* corresponding bits in the per-ring interrupt control registers. */
#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
 
#define GTISR 0x44010
#define GTIMR 0x44014
#define GTIIR 0x44018
3569,6 → 3853,9
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
 
#define CHICKEN_PAR1_1 0x42080
#define FORCE_ARB_IDLE_PLANES (1 << 14)
 
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
3594,6 → 3881,9
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
 
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
 
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
 
3661,6 → 3951,7
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
#define SDE_GMBUS_CPT (1 << 17)
#define SDE_ERROR_CPT (1 << 16)
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
#define SDE_FDI_RXC_CPT (1 << 8)
3685,6 → 3976,13
#define SDEIIR 0xc4008
#define SDEIER 0xc400c
 
#define SERR_INT 0xc4040
#define SERR_INT_POISON (1<<31)
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
 
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PORTD_HOTPLUG_ENABLE (1 << 20)
3734,7 → 4032,7
 
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
3741,8 → 4039,8
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
#define PCH_DPLL_TEST 0xc606c
 
3782,46 → 4080,40
#define PCH_SSC4_AUX_PARMS 0xc6214
 
#define PCH_DPLL_SEL 0xc7000
#define TRANSA_DPLL_ENABLE (1<<3)
#define TRANSA_DPLLB_SEL (1<<0)
#define TRANSA_DPLLA_SEL 0
#define TRANSB_DPLL_ENABLE (1<<7)
#define TRANSB_DPLLB_SEL (1<<4)
#define TRANSB_DPLLA_SEL (0)
#define TRANSC_DPLL_ENABLE (1<<11)
#define TRANSC_DPLLB_SEL (1<<8)
#define TRANSC_DPLLA_SEL (0)
#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
#define TRANS_DPLLA_SEL(pipe) 0
#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
 
/* transcoder */
 
#define _TRANS_HTOTAL_A 0xe0000
#define _PCH_TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
#define _TRANS_HBLANK_A 0xe0004
#define _PCH_TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
#define _TRANS_HSYNC_A 0xe0008
#define _PCH_TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
#define _TRANS_VTOTAL_A 0xe000c
#define _PCH_TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
#define _TRANS_VBLANK_A 0xe0010
#define _PCH_TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _TRANS_VSYNC_A 0xe0014
#define _PCH_TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _TRANS_VSYNCSHIFT_A 0xe0028
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
 
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
#define _TRANSA_DATA_M2 0xe0038
#define _TRANSA_DATA_N2 0xe003c
#define _TRANSA_DP_LINK_M1 0xe0040
#define _TRANSA_DP_LINK_N1 0xe0044
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
#define _PCH_TRANSA_DATA_M1 0xe0030
#define _PCH_TRANSA_DATA_N1 0xe0034
#define _PCH_TRANSA_DATA_M2 0xe0038
#define _PCH_TRANSA_DATA_N2 0xe003c
#define _PCH_TRANSA_LINK_M1 0xe0040
#define _PCH_TRANSA_LINK_N1 0xe0044
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
 
/* Per-transcoder DIP controls */
 
3883,6 → 4175,8
_TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
#define HSW_TVIDEO_DIP_VS_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(trans) \
3890,44 → 4184,52
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
 
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
#define _TRANS_VSYNCSHIFT_B 0xe1028
#define HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1<<31)
#define HSW_STEREO_3D_CTL_B 0x71020
 
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
_TRANS_VSYNCSHIFT_B)
#define HSW_STEREO_3D_CTL(trans) \
_TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
 
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
#define _TRANSB_DATA_M2 0xe1038
#define _TRANSB_DATA_N2 0xe103c
#define _TRANSB_DP_LINK_M1 0xe1040
#define _TRANSB_DP_LINK_N1 0xe1044
#define _TRANSB_DP_LINK_M2 0xe1048
#define _TRANSB_DP_LINK_N2 0xe104c
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
#define _PCH_TRANS_HSYNC_B 0xe1008
#define _PCH_TRANS_VTOTAL_B 0xe100c
#define _PCH_TRANS_VBLANK_B 0xe1010
#define _PCH_TRANS_VSYNC_B 0xe1014
#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
 
#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
_PCH_TRANS_VSYNCSHIFT_B)
 
#define _TRANSACONF 0xf0008
#define _TRANSBCONF 0xf1008
#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
#define _PCH_TRANSB_DATA_M1 0xe1030
#define _PCH_TRANSB_DATA_N1 0xe1034
#define _PCH_TRANSB_DATA_M2 0xe1038
#define _PCH_TRANSB_DATA_N2 0xe103c
#define _PCH_TRANSB_LINK_M1 0xe1040
#define _PCH_TRANSB_LINK_N1 0xe1044
#define _PCH_TRANSB_LINK_M2 0xe1048
#define _PCH_TRANSB_LINK_N2 0xe104c
 
#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
 
#define _PCH_TRANSACONF 0xf0008
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
4011,10 → 4313,9
#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
#define FDI_DP_PORT_WIDTH_X4 (3<<19)
#define FDI_DP_PORT_WIDTH_SHIFT 19
#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
4039,7 → 4340,6
/* train, dp width same as FDI_TX */
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
4061,9 → 4361,6
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
/* LPT */
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
 
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
4246,7 → 4543,7
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
 
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
4282,6 → 4579,10
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
 
#define HSW_IDICR 0x9008
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT 0x120010
 
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
4309,6 → 4610,7
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
#define GEN7_RC_CTL_TO_MODE (1<<28)
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
#define GEN6_RP_DOWN_TIMEOUT 0xA010
4370,7 → 4672,7
#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
 
4392,20 → 4694,6
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
 
#define VLV_IOSF_DOORBELL_REQ 0x182100
#define IOSF_DEVFN_SHIFT 24
#define IOSF_OPCODE_SHIFT 16
#define IOSF_PORT_SHIFT 8
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
#define IOSF_PORT_PUNIT 0x4
#define VLV_IOSF_DATA 0x182104
#define VLV_IOSF_ADDR 0x182108
 
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
 
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
4443,6 → 4731,9
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
 
#define HSW_ROW_CHICKEN3 0xe49c
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
 
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
4563,8 → 4854,8
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
4602,9 → 4893,6
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_BFI_ENABLE (1<<4)
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
 
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
4648,9 → 4936,7
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
 
/* DDI Buffer Translations */
4690,7 → 4976,8
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
#define SBI_DBUFF0_ENABLE (1<<0)
#define SBI_GEN0 0x1f00
#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
 
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
4756,8 → 5043,15
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
 
#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
 
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
4774,6 → 5068,9
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
 
#define WM_MISC 0x45260
#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
 
#define WM_DBG 0x45280
#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
4787,6 → 5084,9
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
4808,10 → 5108,6
#define _PIPE_B_CSC_POSTOFF_ME 0x49144
#define _PIPE_B_CSC_POSTOFF_LO 0x49148
 
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
 
#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
/drivers/video/drm/i915/i915_trace.h
24,6 → 24,8
#define trace_i915_ring_wait_begin(a)
#define trace_i915_gem_object_pwrite(a, b, c)
#define trace_i915_gem_request_add(a, b)
#define trace_i915_gem_ring_dispatch(a, b, c);
#define trace_i915_gem_ring_dispatch(a, b, c)
#define trace_i915_vma_bind(a, b)
#define trace_i915_vma_unbind(a)
 
#endif
/drivers/video/drm/i915/intel_bios.c
211,7 → 211,7
if (!lvds_options)
return;
 
dev_priv->lvds_dither = lvds_options->pixel_dither;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
 
225,7 → 225,7
if (!lvds_lfp_data_ptrs)
return;
 
dev_priv->lvds_vbt = 1;
dev_priv->vbt.lvds_vbt = 1;
 
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
237,7 → 237,7
 
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
 
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
 
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
273,9 → 273,9
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
dev_priv->bios_lvds_val);
dev_priv->vbt.bios_lvds_val);
}
}
}
315,7 → 315,7
 
fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
 
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
 
DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
344,20 → 344,20
 
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
dev_priv->lvds_ssc_freq =
dev_priv->vbt.int_tv_support = general->int_tv_support;
dev_priv->vbt.int_crt_support = general->int_crt_support;
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
dev_priv->vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
dev_priv->display_clock_mode,
dev_priv->fdi_rx_polarity_inverted);
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
dev_priv->vbt.lvds_use_ssc,
dev_priv->vbt.lvds_ssc_freq,
dev_priv->vbt.display_clock_mode,
dev_priv->vbt.fdi_rx_polarity_inverted);
}
}
 
374,7 → 374,7
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
485,7 → 485,7
 
if (SUPPORTS_EDP(dev) &&
driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->edp.support = 1;
dev_priv->vbt.edp_support = 1;
 
if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
500,7 → 500,7
 
edp = find_section(bdb, BDB_EDP);
if (!edp) {
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
507,13 → 507,13
 
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
dev_priv->edp.bpp = 18;
dev_priv->vbt.edp_bpp = 18;
break;
case EDP_24BPP:
dev_priv->edp.bpp = 24;
dev_priv->vbt.edp_bpp = 24;
break;
case EDP_30BPP:
dev_priv->edp.bpp = 30;
dev_priv->vbt.edp_bpp = 30;
break;
}
 
521,48 → 521,48
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
 
dev_priv->edp.pps = *edp_pps;
dev_priv->vbt.edp_pps = *edp_pps;
 
dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
DP_LINK_BW_1_62;
switch (edp_link_params->lanes) {
case 0:
dev_priv->edp.lanes = 1;
dev_priv->vbt.edp_lanes = 1;
break;
case 1:
dev_priv->edp.lanes = 2;
dev_priv->vbt.edp_lanes = 2;
break;
case 3:
default:
dev_priv->edp.lanes = 4;
dev_priv->vbt.edp_lanes = 4;
break;
}
switch (edp_link_params->preemphasis) {
case 0:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
break;
case 1:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
break;
case 2:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
break;
case 3:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
break;
}
switch (edp_link_params->vswing) {
case 0:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
break;
case 1:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
break;
case 2:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
break;
case 3:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
}
}
610,13 → 610,13
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
}
 
dev_priv->child_dev_num = count;
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
624,7 → 624,7
/* skip the device block if device type is invalid */
continue;
}
child_dev_ptr = dev_priv->child_dev + count;
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
sizeof(*p_child));
637,23 → 637,23
{
struct drm_device *dev = dev_priv->dev;
 
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
/* LFP panel data */
dev_priv->lvds_dither = 1;
dev_priv->lvds_vbt = 0;
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
 
/* SDVO panel data */
dev_priv->sdvo_lvds_vbt_mode = NULL;
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
 
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
 
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
dev_priv->vbt.lvds_use_ssc = 1;
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
}
 
 
/drivers/video/drm/i915/intel_crt.c
51,15 → 51,14
u32 adpa_reg;
};
 
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(intel_attached_encoder(connector),
struct intel_crt, base);
return container_of(encoder, struct intel_crt, base);
}
 
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return container_of(encoder, struct intel_crt, base);
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
 
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
83,6 → 82,28
return true;
}
 
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
 
tmp = I915_READ(crt->adpa_reg);
 
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
}
 
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
126,7 → 147,7
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
 
 
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
157,6 → 178,8
else
encoder->connectors_active = true;
 
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode < old_dpms) {
/* From off to on, enable the pipe first. */
intel_crtc_update_dpms(crtc);
206,20 → 229,21
if (HAS_PCH_SPLIT(dev))
pipe_config->has_pch_encoder = true;
 
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
 
return true;
}
 
static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_crt_mode_set(struct intel_encoder *encoder)
{
 
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->base.dev;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
 
if (HAS_PCH_SPLIT(dev))
236,14 → 260,14
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
 
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(BCLRPAT(crtc->pipe), 0);
 
I915_WRITE(crt->adpa_reg, adpa);
}
430,7 → 454,7
 
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
 
if (edid) {
584,6 → 608,10
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, drm_get_connector_name(connector),
force);
 
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
636,7 → 664,7
int ret;
struct i2c_adapter *i2c;
 
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
659,7 → 687,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
 
if (HAS_PCH_SPLIT(dev)) {
if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
 
adpa = I915_READ(crt->adpa_reg);
678,10 → 706,6
* Routines for controlling stuff on the analog port
*/
 
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_set = intel_crt_mode_set,
};
 
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
749,8 → 773,10
crt->adpa_reg = ADPA;
 
crt->base.compute_config = intel_crt_compute_config;
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev))
759,7 → 785,6
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
drm_sysfs_connector_add(connector);
/drivers/video/drm/i915/intel_ddi.c
84,25 → 84,17
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool use_fdi_mode)
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
const u32 *ddi_translations = ((use_fdi_mode) ?
const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp);
hsw_ddi_translations_dp;
 
DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
port_name(port),
use_fdi_mode ? "FDI" : "DP");
 
WARN((use_fdi_mode && (port != PORT_E)),
"Programming port %c in FDI mode, this probably will not work.\n",
port_name(port));
 
for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
118,14 → 110,8
if (!HAS_DDI(dev))
return;
 
for (port = PORT_A; port < PORT_E; port++)
intel_prepare_ddi_buffers(dev, port, false);
 
/* DDI E is the suggested one to work in FDI mode, so program is as such
* by default. It will have to be re-programmed in case a digital DP
* output will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
for (port = PORT_A; port <= PORT_E; port++)
intel_prepare_ddi_buffers(dev, port);
}
 
static const long hsw_ddi_buf_ctl_values[] = {
174,6 → 160,8
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
* - FDI delay to 90h
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
FDI_RX_PWRDN_LANE0_VAL(2) |
181,7 → 169,8
 
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
209,7 → 198,7
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->fdi_lanes - 1) << 1) |
((intel_crtc->config.fdi_lanes - 1) << 1) |
hsw_ddi_buf_ctl_values[i / 2]);
POSTING_READ(DDI_BUF_CTL(PORT_E));
 
278,444 → 267,40
DRM_ERROR("FDI link training failed!\n");
}
 
/* WRPLL clock dividers */
struct wrpll_tmds_clock {
u32 clock;
u16 p; /* Post divider */
u16 n2; /* Feedback divider */
u16 r2; /* Reference divider */
};
 
/* Table of matching values for WRPLL clocks programming for each frequency.
* The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
{21000, 36, 21, 15},
{21912, 42, 29, 17},
{22000, 36, 22, 15},
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
{25200, 30, 21, 15},
{26000, 36, 26, 15},
{27000, 30, 21, 14},
{27027, 18, 100, 111},
{27500, 30, 29, 19},
{28000, 34, 30, 17},
{28320, 26, 30, 22},
{28322, 32, 42, 25},
{28750, 24, 23, 18},
{29000, 30, 29, 18},
{29750, 32, 30, 17},
{30000, 30, 25, 15},
{30750, 30, 41, 24},
{31000, 30, 31, 18},
{31500, 30, 28, 16},
{32000, 30, 32, 18},
{32500, 28, 32, 19},
{33000, 24, 22, 15},
{34000, 28, 30, 17},
{35000, 26, 32, 19},
{35500, 24, 30, 19},
{36000, 26, 26, 15},
{36750, 26, 46, 26},
{37000, 24, 23, 14},
{37762, 22, 40, 26},
{37800, 20, 21, 15},
{38000, 24, 27, 16},
{38250, 24, 34, 20},
{39000, 24, 26, 15},
{40000, 24, 32, 18},
{40500, 20, 21, 14},
{40541, 22, 147, 89},
{40750, 18, 19, 14},
{41000, 16, 17, 14},
{41500, 22, 44, 26},
{41540, 22, 44, 26},
{42000, 18, 21, 15},
{42500, 22, 45, 26},
{43000, 20, 43, 27},
{43163, 20, 24, 15},
{44000, 18, 22, 15},
{44900, 20, 108, 65},
{45000, 20, 25, 15},
{45250, 20, 52, 31},
{46000, 18, 23, 15},
{46750, 20, 45, 26},
{47000, 20, 40, 23},
{48000, 18, 24, 15},
{49000, 18, 49, 30},
{49500, 16, 22, 15},
{50000, 18, 25, 15},
{50500, 18, 32, 19},
{51000, 18, 34, 20},
{52000, 18, 26, 15},
{52406, 14, 34, 25},
{53000, 16, 22, 14},
{54000, 16, 24, 15},
{54054, 16, 173, 108},
{54500, 14, 24, 17},
{55000, 12, 22, 18},
{56000, 14, 45, 31},
{56250, 16, 25, 15},
{56750, 14, 25, 17},
{57000, 16, 27, 16},
{58000, 16, 43, 25},
{58250, 16, 38, 22},
{58750, 16, 40, 23},
{59000, 14, 26, 17},
{59341, 14, 40, 26},
{59400, 16, 44, 25},
{60000, 16, 32, 18},
{60500, 12, 39, 29},
{61000, 14, 49, 31},
{62000, 14, 37, 23},
{62250, 14, 42, 26},
{63000, 12, 21, 15},
{63500, 14, 28, 17},
{64000, 12, 27, 19},
{65000, 14, 32, 19},
{65250, 12, 29, 20},
{65500, 12, 32, 22},
{66000, 12, 22, 15},
{66667, 14, 38, 22},
{66750, 10, 21, 17},
{67000, 14, 33, 19},
{67750, 14, 58, 33},
{68000, 14, 30, 17},
{68179, 14, 46, 26},
{68250, 14, 46, 26},
{69000, 12, 23, 15},
{70000, 12, 28, 18},
{71000, 12, 30, 19},
{72000, 12, 24, 15},
{73000, 10, 23, 17},
{74000, 12, 23, 14},
{74176, 8, 100, 91},
{74250, 10, 22, 16},
{74481, 12, 43, 26},
{74500, 10, 29, 21},
{75000, 12, 25, 15},
{75250, 10, 39, 28},
{76000, 12, 27, 16},
{77000, 12, 53, 31},
{78000, 12, 26, 15},
{78750, 12, 28, 16},
{79000, 10, 38, 26},
{79500, 10, 28, 19},
{80000, 12, 32, 18},
{81000, 10, 21, 14},
{81081, 6, 100, 111},
{81624, 8, 29, 24},
{82000, 8, 17, 14},
{83000, 10, 40, 26},
{83950, 10, 28, 18},
{84000, 10, 28, 18},
{84750, 6, 16, 17},
{85000, 6, 17, 18},
{85250, 10, 30, 19},
{85750, 10, 27, 17},
{86000, 10, 43, 27},
{87000, 10, 29, 18},
{88000, 10, 44, 27},
{88500, 10, 41, 25},
{89000, 10, 28, 17},
{89012, 6, 90, 91},
{89100, 10, 33, 20},
{90000, 10, 25, 15},
{91000, 10, 32, 19},
{92000, 10, 46, 27},
{93000, 10, 31, 18},
{94000, 10, 40, 23},
{94500, 10, 28, 16},
{95000, 10, 44, 25},
{95654, 10, 39, 22},
{95750, 10, 39, 22},
{96000, 10, 32, 18},
{97000, 8, 23, 16},
{97750, 8, 42, 29},
{98000, 8, 45, 31},
{99000, 8, 22, 15},
{99750, 8, 34, 23},
{100000, 6, 20, 18},
{100500, 6, 19, 17},
{101000, 6, 37, 33},
{101250, 8, 21, 14},
{102000, 6, 17, 15},
{102250, 6, 25, 22},
{103000, 8, 29, 19},
{104000, 8, 37, 24},
{105000, 8, 28, 18},
{106000, 8, 22, 14},
{107000, 8, 46, 29},
{107214, 8, 27, 17},
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
{110500, 8, 36, 22},
{111000, 8, 23, 14},
{111264, 8, 150, 91},
{111375, 8, 33, 20},
{112000, 8, 63, 38},
{112500, 8, 25, 15},
{113100, 8, 57, 34},
{113309, 8, 42, 25},
{114000, 8, 27, 16},
{115000, 6, 23, 18},
{116000, 8, 43, 25},
{117000, 8, 26, 15},
{117500, 8, 40, 23},
{118000, 6, 38, 29},
{119000, 8, 30, 17},
{119500, 8, 46, 26},
{119651, 8, 39, 22},
{120000, 8, 32, 18},
{121000, 6, 39, 29},
{121250, 6, 31, 23},
{121750, 6, 23, 17},
{122000, 6, 42, 31},
{122614, 6, 30, 22},
{123000, 6, 41, 30},
{123379, 6, 37, 27},
{124000, 6, 51, 37},
{125000, 6, 25, 18},
{125250, 4, 13, 14},
{125750, 4, 27, 29},
{126000, 6, 21, 15},
{127000, 6, 24, 17},
{127250, 6, 41, 29},
{128000, 6, 27, 19},
{129000, 6, 43, 30},
{129859, 4, 25, 26},
{130000, 6, 26, 18},
{130250, 6, 42, 29},
{131000, 6, 32, 22},
{131500, 6, 38, 26},
{131850, 6, 41, 28},
{132000, 6, 22, 15},
{132750, 6, 28, 19},
{133000, 6, 34, 23},
{133330, 6, 37, 25},
{134000, 6, 61, 41},
{135000, 6, 21, 14},
{135250, 6, 167, 111},
{136000, 6, 62, 41},
{137000, 6, 35, 23},
{138000, 6, 23, 15},
{138500, 6, 40, 26},
{138750, 6, 37, 24},
{139000, 6, 34, 22},
{139050, 6, 34, 22},
{139054, 6, 34, 22},
{140000, 6, 28, 18},
{141000, 6, 36, 23},
{141500, 6, 22, 14},
{142000, 6, 30, 19},
{143000, 6, 27, 17},
{143472, 4, 17, 16},
{144000, 6, 24, 15},
{145000, 6, 29, 18},
{146000, 6, 47, 29},
{146250, 6, 26, 16},
{147000, 6, 49, 30},
{147891, 6, 23, 14},
{148000, 6, 23, 14},
{148250, 6, 28, 17},
{148352, 4, 100, 91},
{148500, 6, 33, 20},
{149000, 6, 48, 29},
{150000, 6, 25, 15},
{151000, 4, 19, 17},
{152000, 6, 27, 16},
{152280, 6, 44, 26},
{153000, 6, 34, 20},
{154000, 6, 53, 31},
{155000, 6, 31, 18},
{155250, 6, 50, 29},
{155750, 6, 45, 26},
{156000, 6, 26, 15},
{157000, 6, 61, 35},
{157500, 6, 28, 16},
{158000, 6, 65, 37},
{158250, 6, 44, 25},
{159000, 6, 53, 30},
{159500, 6, 39, 22},
{160000, 6, 32, 18},
{161000, 4, 31, 26},
{162000, 4, 18, 15},
{162162, 4, 131, 109},
{162500, 4, 53, 44},
{163000, 4, 29, 24},
{164000, 4, 17, 14},
{165000, 4, 22, 18},
{166000, 4, 32, 26},
{167000, 4, 26, 21},
{168000, 4, 46, 37},
{169000, 4, 104, 83},
{169128, 4, 64, 51},
{169500, 4, 39, 31},
{170000, 4, 34, 27},
{171000, 4, 19, 15},
{172000, 4, 51, 40},
{172750, 4, 32, 25},
{172800, 4, 32, 25},
{173000, 4, 41, 32},
{174000, 4, 49, 38},
{174787, 4, 22, 17},
{175000, 4, 35, 27},
{176000, 4, 30, 23},
{177000, 4, 38, 29},
{178000, 4, 29, 22},
{178500, 4, 37, 28},
{179000, 4, 53, 40},
{179500, 4, 73, 55},
{180000, 4, 20, 15},
{181000, 4, 55, 41},
{182000, 4, 31, 23},
{183000, 4, 42, 31},
{184000, 4, 30, 22},
{184750, 4, 26, 19},
{185000, 4, 37, 27},
{186000, 4, 51, 37},
{187000, 4, 36, 26},
{188000, 4, 32, 23},
{189000, 4, 21, 15},
{190000, 4, 38, 27},
{190960, 4, 41, 29},
{191000, 4, 41, 29},
{192000, 4, 27, 19},
{192250, 4, 37, 26},
{193000, 4, 20, 14},
{193250, 4, 53, 37},
{194000, 4, 23, 16},
{194208, 4, 23, 16},
{195000, 4, 26, 18},
{196000, 4, 45, 31},
{197000, 4, 35, 24},
{197750, 4, 41, 28},
{198000, 4, 22, 15},
{198500, 4, 25, 17},
{199000, 4, 28, 19},
{200000, 4, 37, 25},
{201000, 4, 61, 41},
{202000, 4, 112, 75},
{202500, 4, 21, 14},
{203000, 4, 146, 97},
{204000, 4, 62, 41},
{204750, 4, 44, 29},
{205000, 4, 38, 25},
{206000, 4, 29, 19},
{207000, 4, 23, 15},
{207500, 4, 40, 26},
{208000, 4, 37, 24},
{208900, 4, 48, 31},
{209000, 4, 48, 31},
{209250, 4, 31, 20},
{210000, 4, 28, 18},
{211000, 4, 25, 16},
{212000, 4, 22, 14},
{213000, 4, 30, 19},
{213750, 4, 38, 24},
{214000, 4, 46, 29},
{214750, 4, 35, 22},
{215000, 4, 43, 27},
{216000, 4, 24, 15},
{217000, 4, 37, 23},
{218000, 4, 42, 26},
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
{221000, 4, 36, 22},
{222000, 4, 23, 14},
{222525, 4, 28, 17},
{222750, 4, 33, 20},
{227000, 4, 37, 22},
{230250, 4, 29, 17},
{233500, 4, 38, 22},
{235000, 4, 40, 23},
{238000, 4, 30, 17},
{241500, 2, 17, 19},
{245250, 2, 20, 22},
{247750, 2, 22, 24},
{253250, 2, 15, 16},
{256250, 2, 18, 19},
{262500, 2, 31, 32},
{267250, 2, 66, 67},
{268500, 2, 94, 95},
{270000, 2, 14, 14},
{272500, 2, 77, 76},
{273750, 2, 57, 56},
{280750, 2, 24, 23},
{281250, 2, 23, 22},
{286000, 2, 17, 16},
{291750, 2, 26, 24},
{296703, 2, 56, 51},
{297000, 2, 22, 20},
{298000, 2, 21, 19},
};
 
static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_ddi_mode_set(struct intel_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int port = intel_ddi_get_encoder_port(encoder);
int pipe = crtc->pipe;
int type = encoder->type;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
 
intel_crtc->eld_vld = false;
crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
enc_to_dig_port(&encoder->base);
 
intel_dp->DP = intel_dig_port->port_reversal |
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
break;
case 2:
intel_dp->DP |= DDI_PORT_WIDTH_X2;
break;
case 4:
intel_dp->DP |= DDI_PORT_WIDTH_X4;
break;
default:
intel_dp->DP |= DDI_PORT_WIDTH_X4;
WARN(1, "Unexpected DP lane count %d\n",
intel_dp->lane_count);
break;
}
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
 
intel_dp_init_link_config(intel_dp);
 
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
723,14 → 308,14
* patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
 
intel_hdmi->set_infoframes(encoder, adjusted_mode);
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
}
 
748,8 → 333,8
}
 
if (num_encoders != 1)
WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
intel_crtc->pipe);
WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
pipe_name(intel_crtc->pipe));
 
BUG_ON(ret == NULL);
return ret;
802,31 → 387,228
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
}
 
static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
 
#define P_MIN 2
#define P_MAX 64
#define P_INC 2
 
/* Constraints for PLL good behavior */
#define REF_MIN 48
#define REF_MAX 400
#define VCO_MIN 2400
#define VCO_MAX 4800
 
#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
 
struct wrpll_rnp {
unsigned p, n2, r2;
};
 
static unsigned wrpll_get_budget_for_freq(int clock)
{
u32 i;
unsigned budget;
 
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
if (clock <= wrpll_tmds_clock_table[i].clock)
switch (clock) {
case 25175000:
case 25200000:
case 27000000:
case 27027000:
case 37762500:
case 37800000:
case 40500000:
case 40541000:
case 54000000:
case 54054000:
case 59341000:
case 59400000:
case 72000000:
case 74176000:
case 74250000:
case 81000000:
case 81081000:
case 89012000:
case 89100000:
case 108000000:
case 108108000:
case 111264000:
case 111375000:
case 148352000:
case 148500000:
case 162000000:
case 162162000:
case 222525000:
case 222750000:
case 296703000:
case 297000000:
budget = 0;
break;
case 233500000:
case 245250000:
case 247750000:
case 253250000:
case 298000000:
budget = 1500;
break;
case 169128000:
case 169500000:
case 179500000:
case 202000000:
budget = 2000;
break;
case 256250000:
case 262500000:
case 270000000:
case 272500000:
case 273750000:
case 280750000:
case 281250000:
case 286000000:
case 291750000:
budget = 4000;
break;
case 267250000:
case 268500000:
budget = 5000;
break;
default:
budget = 1000;
break;
}
 
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
return budget;
}
 
*p = wrpll_tmds_clock_table[i].p;
*n2 = wrpll_tmds_clock_table[i].n2;
*r2 = wrpll_tmds_clock_table[i].r2;
static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
unsigned r2, unsigned n2, unsigned p,
struct wrpll_rnp *best)
{
uint64_t a, b, c, d, diff, diff_best;
 
if (wrpll_tmds_clock_table[i].clock != clock)
DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
wrpll_tmds_clock_table[i].clock, clock);
/* No best (r,n,p) yet */
if (best->p == 0) {
best->p = p;
best->n2 = n2;
best->r2 = r2;
return;
}
 
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, *p, *n2, *r2);
/*
* Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
* freq2k.
*
* delta = 1e6 *
* abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
* freq2k;
*
* and we would like delta <= budget.
*
* If the discrepancy is above the PPM-based budget, always prefer to
* improve upon the previous solution. However, if you're within the
* budget, try to maximize Ref * VCO, that is N / (P * R^2).
*/
a = freq2k * budget * p * r2;
b = freq2k * budget * best->p * best->r2;
diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
diff_best = ABS_DIFF((freq2k * best->p * best->r2),
(LC_FREQ_2K * best->n2));
c = 1000000 * diff;
d = 1000000 * diff_best;
 
if (a < c && b < d) {
/* If both are above the budget, pick the closer */
if (best->p * best->r2 * diff < p * r2 * diff_best) {
best->p = p;
best->n2 = n2;
best->r2 = r2;
}
} else if (a >= c && b < d) {
/* If A is below the threshold but B is above it? Update. */
best->p = p;
best->n2 = n2;
best->r2 = r2;
} else if (a >= c && b >= d) {
/* Both are below the limit, so pick the higher n2/(r2*r2) */
if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
best->p = p;
best->n2 = n2;
best->r2 = r2;
}
}
/* Otherwise a < c && b >= d, do nothing */
}
 
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
static void
intel_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
uint64_t freq2k;
unsigned p, n2, r2;
struct wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
 
freq2k = clock / 100;
 
budget = wrpll_get_budget_for_freq(clock);
 
/* Special case handling for 540 pixel clock: bypass WR PLL entirely
* and directly pass the LC PLL to it. */
if (freq2k == 5400000) {
*n2_out = 2;
*p_out = 1;
*r2_out = 2;
return;
}
 
/*
* Ref = LC_FREQ / R, where Ref is the actual reference input seen by
* the WR PLL.
*
* We want R so that REF_MIN <= Ref <= REF_MAX.
* Injecting R2 = 2 * R gives:
* REF_MAX * r2 > LC_FREQ * 2 and
* REF_MIN * r2 < LC_FREQ * 2
*
* Which means the desired boundaries for r2 are:
* LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
*
*/
for (r2 = LC_FREQ * 2 / REF_MAX + 1;
r2 <= LC_FREQ * 2 / REF_MIN;
r2++) {
 
/*
* VCO = N * Ref, that is: VCO = N * LC_FREQ / R
*
* Once again we want VCO_MIN <= VCO <= VCO_MAX.
* Injecting R2 = 2 * R and N2 = 2 * N, we get:
* VCO_MAX * r2 > n2 * LC_FREQ and
* VCO_MIN * r2 < n2 * LC_FREQ)
*
* Which means the desired boundaries for n2 are:
* VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
*/
for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
n2 <= VCO_MAX * r2 / LC_FREQ;
n2++) {
 
for (p = P_MIN; p <= P_MAX; p += P_INC)
wrpll_update_rnp(freq2k, budget,
r2, n2, p, &best);
}
}
 
*n2_out = best.n2;
*p_out = best.p;
*r2_out = best.r2;
 
DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, *p_out, *n2_out, *r2_out);
}
 
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
835,6 → 617,7
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
uint32_t reg, val;
int clock = intel_crtc->config.port_clock;
 
/* TODO: reuse PLLs when possible (compare values) */
 
863,7 → 646,7
return true;
 
} else if (type == INTEL_OUTPUT_HDMI) {
int p, n2, r2;
unsigned p, n2, r2;
 
if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
885,7 → 668,7
WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
"WRPLL already enabled\n");
 
intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
995,7 → 778,7
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (dev_priv->pch_pf_size)
if (intel_crtc->config.pch_pfit.enabled)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
1022,7 → 805,7
 
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->fdi_lanes - 1) << 1;
temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
 
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
1030,25 → 813,10
 
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
switch (intel_dp->lane_count) {
case 1:
temp |= TRANS_DDI_PORT_WIDTH_X1;
break;
case 2:
temp |= TRANS_DDI_PORT_WIDTH_X2;
break;
case 4:
temp |= TRANS_DDI_PORT_WIDTH_X4;
break;
default:
temp |= TRANS_DDI_PORT_WIDTH_X4;
WARN(1, "Unsupported lane count %d\n",
intel_dp->lane_count);
}
 
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %d\n",
intel_encoder->type, pipe);
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
}
 
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
1148,7 → 916,7
}
}
 
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
 
return false;
}
1324,7 → 1092,8
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port),
intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
1332,9 → 1101,10
intel_dp_stop_link_train(intel_dp);
 
ironlake_edp_backlight_on(intel_dp);
intel_edp_psr_enable(intel_dp);
}
 
if (intel_crtc->eld_vld) {
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1352,13 → 1122,17
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
(pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_edp_psr_disable(intel_dp);
ironlake_edp_backlight_off(intel_dp);
}
}
1365,15 → 1139,18
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450;
else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
LCPLL_CLK_FREQ_450)
return 450;
uint32_t lcpll = I915_READ(LCPLL_CTL);
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_ULT(dev_priv->dev))
return 338;
return 337500;
else
return 540;
return 540000;
}
 
void intel_ddi_pll_init(struct drm_device *dev)
1386,7 → 1163,7
* Don't even try to turn it on.
*/
 
DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
 
if (val & LCPLL_CD_SOURCE_FCLK)
1472,6 → 1249,27
intel_dp_check_link_status(intel_dp);
}
 
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
u32 temp, flags = 0;
 
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (temp & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
}
 
static void intel_ddi_destroy(struct drm_encoder *encoder)
{
/* HDMI has nothing special to destroy, so we can go with this. */
1482,9 → 1280,13
struct intel_crtc_config *pipe_config)
{
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
 
WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
 
if (port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
if (type == INTEL_OUTPUT_HDMI)
return intel_hdmi_compute_config(encoder, pipe_config);
else
1495,10 → 1297,6
.destroy = intel_ddi_destroy,
};
 
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_set = intel_ddi_mode_set,
};
 
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1518,35 → 1316,25
return;
}
 
if (port != PORT_A) {
hdmi_connector = kzalloc(sizeof(struct intel_connector),
GFP_KERNEL);
if (!hdmi_connector) {
kfree(dp_connector);
kfree(intel_dig_port);
return;
}
}
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
 
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
 
intel_dig_port->port = port;
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
1554,7 → 1342,21
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_ddi_hot_plug;
 
if (hdmi_connector)
if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(dp_connector);
return;
}
 
if (intel_encoder->type != INTEL_OUTPUT_EDP) {
hdmi_connector = kzalloc(sizeof(struct intel_connector),
GFP_KERNEL);
if (!hdmi_connector) {
return;
}
 
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
intel_dp_init_connector(intel_dig_port, dp_connector);
}
}
/drivers/video/drm/i915/intel_display.c
41,29 → 41,22
#include <drm/drm_crtc_helper.h>
//#include <linux/dma_remapping.h>
 
#define MAX_ERRNO 4095
phys_addr_t get_bus_addr(void);
 
 
#define MAX_ERRNO 4095
 
 
 
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
typedef struct {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
 
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
 
 
typedef struct {
int min, max;
} intel_range_t;
73,29 → 66,10
int p2_slow, p2_fast;
} intel_p2_t;
 
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
/**
* find_pll() - Find the best values for the PLL
* @limit: limits for the PLL
* @crtc: current CRTC
* @target: target frequency in kHz
* @refclk: reference clock frequency in kHz
* @match_clock: if provided, @best_clock P divider must
* match the P divider from @match_clock
* used for LVDS downclocking
* @best_clock: best PLL values found
*
* Returns true on success, false on failure.
*/
bool (*find_pll)(const intel_limit_t *limit,
struct drm_crtc *crtc,
int target, int refclk,
intel_clock_t *match_clock,
intel_clock_t *best_clock);
};
 
/* FDI */
111,29 → 85,6
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
}
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static bool
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
144,7 → 95,7
return 27;
}
 
static const intel_limit_t intel_limits_i8xx_dvo = {
static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
155,9 → 106,21
.p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 2 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
.p = { .min = 4, .max = 128 },
.p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 4 },
};
 
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
169,7 → 132,6
.p1 = { .min = 1, .max = 6 },
.p2 = { .dot_limit = 165000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_i9xx_sdvo = {
183,7 → 145,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_i9xx_lvds = {
197,7 → 158,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
 
 
214,7 → 174,6
.p2_slow = 10,
.p2_fast = 10
},
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_hdmi = {
228,7 → 187,6
.p1 = { .min = 1, .max = 8},
.p2 = { .dot_limit = 165000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
243,7 → 201,6
.p2 = { .dot_limit = 0,
.p2_slow = 14, .p2_fast = 14
},
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
258,23 → 215,8
.p2 = { .dot_limit = 0,
.p2_slow = 7, .p2_fast = 7
},
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_g4x_display_port = {
.dot = { .min = 161670, .max = 227000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 2 },
.m = { .min = 97, .max = 108 },
.m1 = { .min = 0x10, .max = 0x12 },
.m2 = { .min = 0x05, .max = 0x06 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_g4x_dp,
};
 
static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
288,7 → 230,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
 
static const intel_limit_t intel_limits_pineview_lvds = {
302,7 → 243,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_find_best_PLL,
};
 
/* Ironlake / Sandybridge
321,7 → 261,6
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_single_lvds = {
335,7 → 274,6
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
349,7 → 287,6
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
 
/* LVDS 100mhz refclk limits. */
364,7 → 301,6
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
378,23 → 314,8
.p1 = { .min = 2, .max = 6 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
 
static const intel_limit_t intel_limits_ironlake_display_port = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000},
.n = { .min = 1, .max = 2 },
.m = { .min = 81, .max = 90 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_ironlake_dp,
};
 
static const intel_limit_t intel_limits_vlv_dac = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
403,15 → 324,14
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p1 = { .min = 1, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
.vco = { .min = 4000000, .max = 5994000},
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
420,7 → 340,6
.p1 = { .min = 2, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
static const intel_limit_t intel_limits_vlv_dp = {
431,61 → 350,11
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p1 = { .min = 1, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
return 0;
}
 
I915_WRITE(DPIO_REG, reg);
I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
return 0;
}
 
return I915_READ(DPIO_DATA);
}
 
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
return;
}
 
I915_WRITE(DPIO_DATA, val);
I915_WRITE(DPIO_REG, reg);
I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
}
 
static void vlv_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Reset the DPIO config */
I915_WRITE(DPIO_CTL, 0);
POSTING_READ(DPIO_CTL);
I915_WRITE(DPIO_CTL, 1);
POSTING_READ(DPIO_CTL);
}
 
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
504,10 → 373,7
else
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
} else
limit = &intel_limits_ironlake_dac;
 
return limit;
528,8 → 394,6
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
 
565,8 → 429,10
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dvo;
limit = &intel_limits_i8xx_dac;
}
return limit;
}
580,13 → 446,14
clock->dot = clock->vco / clock->p;
}
 
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
{
if (IS_PINEVIEW(dev)) {
pineview_clock(refclk, clock);
return;
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
 
static void i9xx_clock(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
643,10 → 510,9
}
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
 
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
675,8 → 541,7
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
/* m1 is always 0 in Pineview */
if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
if (clock.m2 >= clock.m1)
break;
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
684,7 → 549,7
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
 
intel_clock(dev, refclk, &clock);
i9xx_clock(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
706,12 → 571,71
}
 
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
int err = target;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
 
memset(best_clock, 0, sizeof(*best_clock));
 
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
 
pineview_clock(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
if (match_clock &&
clock.p != match_clock->p)
continue;
 
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
}
}
 
return (err != target);
}
 
static bool
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
int max_n;
bool found;
/* approximately equals target * 0.00585 */
719,12 → 643,6
found = false;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
 
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
749,13 → 667,10
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
 
intel_clock(dev, refclk, &clock);
i9xx_clock(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
if (match_clock &&
clock.p != match_clock->p)
continue;
 
this_err = abs(clock.dot - target);
if (this_err < err_most) {
772,66 → 687,13
}
 
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
 
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
clock.p2 = 10;
clock.m1 = 12;
clock.m2 = 9;
} else {
clock.n = 2;
clock.p1 = 1;
clock.p2 = 10;
clock.m1 = 14;
clock.m2 = 8;
}
intel_clock(dev, refclk, &clock);
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
 
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 2;
clock.m1 = 23;
clock.m2 = 8;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 14;
clock.m2 = 2;
}
clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
clock.p = (clock.p1 * clock.p2);
clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
clock.vco = 0;
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
static bool
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
u32 m, n, fastclk;
u32 updrate, minupdate, fracbits, p;
u32 updrate, minupdate, p;
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
 
842,7 → 704,6
fastclk = dotclk / (2*100);
updrate = 0;
minupdate = 19200;
fracbits = 1;
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
bestm1 = bestm2 = bestp1 = bestp2 = 0;
 
1056,7 → 917,7
}
 
/* Only for pre-ILK configs */
static void assert_pll(struct drm_i915_private *dev_priv,
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
1070,17 → 931,25
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
 
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (crtc->config.shared_dpll < 0)
return NULL;
 
return &dev_priv->shared_dplls[crtc->config.shared_dpll];
}
 
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
struct intel_pch_pll *pll,
struct intel_crtc *crtc,
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state)
{
u32 val;
bool cur_state;
struct intel_dpll_hw_state hw_state;
 
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1088,36 → 957,14
}
 
if (WARN (!pll,
"asserting PCH PLL %s with no PLL\n", state_string(state)))
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
 
val = I915_READ(pll->pll_reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
WARN(cur_state != state,
"PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
pll->pll_reg, state_string(state), state_string(cur_state), val);
 
/* Make sure the selected PLL is correctly attached to the transcoder */
if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
 
pch_dpll = I915_READ(PCH_DPLL_SEL);
cur_state = pll->pll_reg == _PCH_DPLL_B;
if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
"PLL[%d] not attached to this transcoder %d: %08x\n",
cur_state, crtc->pipe, pch_dpll)) {
cur_state = !!(val >> (4*crtc->pipe + 3));
WARN(cur_state != state,
"PLL[%d] not %s on this transcoder %d: %08x\n",
pll->pll_reg == _PCH_DPLL_B,
state_string(state),
crtc->pipe,
val);
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
}
}
#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
1181,15 → 1028,19
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
 
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
cur_state = !!(val & FDI_RX_PLL_ENABLE);
WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
 
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1234,8 → 1085,8
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
 
if (!intel_using_power_well(dev_priv->dev) &&
cpu_transcoder != TRANSCODER_EDP) {
if (!intel_display_power_enabled(dev_priv->dev,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
reg = PIPECONF(cpu_transcoder);
1269,12 → 1120,13
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
u32 val;
int cur_pipe;
 
/* Planes are fixed to pipes on ILK+ */
if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
/* Primary planes are fixed to pipes on gen4+ */
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
WARN((val & DISPLAY_PLANE_ENABLE),
1284,7 → 1136,7
}
 
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
for_each_pipe(i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1298,21 → 1150,32
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
u32 val;
 
if (!IS_VALLEYVIEW(dev_priv->dev))
return;
 
/* Need to check both planes against the pipe */
if (IS_VALLEYVIEW(dev)) {
for (i = 0; i < dev_priv->num_plane; i++) {
reg = SPCNTR(pipe, i);
val = I915_READ(reg);
WARN((val & SP_ENABLE),
"sprite %d assertion failure, should be off on pipe %c but is still active\n",
pipe * 2 + i, pipe_name(pipe));
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, i), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
WARN((val & SPRITE_ENABLE),
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
WARN((val & DVS_ENABLE),
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
}
 
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
1330,7 → 1193,7
WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
 
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
1337,7 → 1200,7
u32 val;
bool enabled;
 
reg = TRANSCONF(pipe);
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
WARN(enabled,
1463,49 → 1326,92
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
 
/**
* intel_enable_pll - enable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
* make sure the PLL reg is writable first though, since the panel write
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
*
* Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
static void vlv_enable_pll(struct intel_crtc *crtc)
{
int reg;
u32 val;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
u32 dpll = crtc->config.dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
assert_panel_unlocked(dev_priv, pipe);
assert_panel_unlocked(dev_priv, crtc->pipe);
 
reg = DPLL(pipe);
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150);
 
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
 
I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(crtc->pipe));
 
/* We do this three times for luck */
I915_WRITE(reg, val);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
 
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
u32 dpll = crtc->config.dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(dev_priv->info->gen >= 5);
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
 
I915_WRITE(reg, dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(reg);
udelay(150);
 
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config.dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(reg, dpll);
}
 
/* We do this three times for luck */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
 
/**
* intel_disable_pll - disable a PLL
* i9xx_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
1513,11 → 1419,8
*
* Note! This is for pre-ILK only.
*/
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
int reg;
u32 val;
 
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
1525,76 → 1428,26
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
 
reg = DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
I915_WRITE(DPLL(pipe), 0);
POSTING_READ(DPLL(pipe));
}
 
/* SBI access */
static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
{
u32 tmp;
u32 port_mask;
 
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
 
if (destination == SBI_ICLK)
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
if (!port)
port_mask = DPLL_PORTB_READY_MASK;
else
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
port_mask = DPLL_PORTC_READY_MASK;
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
'B' + port, I915_READ(DPLL(0)));
}
}
 
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
 
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
 
return I915_READ(SBI_DATA);
}
 
/**
* ironlake_enable_pch_pll - enable PCH PLL
* ironlake_enable_shared_dpll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
1601,87 → 1454,64
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
int reg;
u32 val;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH PLLs only available on ILK, SNB and IVB */
BUG_ON(dev_priv->info->gen < 5);
pll = intel_crtc->pch_pll;
if (pll == NULL)
if (WARN_ON(pll == NULL))
return;
 
if (WARN_ON(pll->refcount == 0))
return;
 
DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
 
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
 
if (pll->active++ && pll->on) {
assert_pch_pll_enabled(dev_priv, pll, NULL);
if (pll->active++) {
WARN_ON(!pll->on);
assert_shared_dpll_enabled(dev_priv, pll);
return;
}
WARN_ON(pll->on);
 
DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
 
reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
 
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
 
static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
u32 val;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
if (pll == NULL)
if (WARN_ON(pll == NULL))
return;
 
if (WARN_ON(pll->refcount == 0))
return;
 
DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
 
if (WARN_ON(pll->active == 0)) {
assert_pch_pll_disabled(dev_priv, pll, NULL);
assert_shared_dpll_disabled(dev_priv, pll);
return;
}
 
if (--pll->active) {
assert_pch_pll_enabled(dev_priv, pll, NULL);
assert_shared_dpll_enabled(dev_priv, pll);
WARN_ON(!pll->on);
if (--pll->active)
return;
}
 
DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
 
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
 
reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
 
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
}
 
1690,6 → 1520,7
{
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t reg, val, pipeconf_val;
 
/* PCH only available on ILK+ */
1696,9 → 1527,8
BUG_ON(dev_priv->info->gen < 5);
 
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv,
to_intel_crtc(crtc)->pch_pll,
to_intel_crtc(crtc));
assert_shared_dpll_enabled(dev_priv,
intel_crtc_to_shared_dpll(intel_crtc));
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
1713,7 → 1543,7
I915_WRITE(reg, val);
}
 
reg = TRANSCONF(pipe);
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
 
1738,7 → 1568,7
 
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
 
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1767,8 → 1597,8
else
val |= TRANS_PROGRESSIVE;
 
I915_WRITE(TRANSCONF(TRANSCODER_A), val);
if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
I915_WRITE(LPT_TRANSCONF, val);
if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
 
1785,13 → 1615,13
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
 
reg = TRANSCONF(pipe);
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
if (!HAS_PCH_IBX(dev)) {
/* Workaround: Clear the timing override chicken bit again. */
1806,11 → 1636,11
{
u32 val;
 
val = I915_READ(_TRANSACONF);
val = I915_READ(LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
I915_WRITE(_TRANSACONF, val);
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
 
/* Workaround: clear timing override bit. */
1842,6 → 1672,9
int reg;
u32 val;
 
assert_planes_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
 
if (HAS_PCH_LPT(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
2046,7 → 1879,7
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin_from_display_plane(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
2103,7 → 1936,7
case 1:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
return -EINVAL;
}
 
2152,6 → 1985,9
dspcntr &= ~DISPPLANE_TILED;
}
 
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
I915_WRITE(reg, dspcntr);
 
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2166,16 → 2002,17
intel_crtc->dspaddr_offset = linear_offset;
}
 
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
 
return 0;
2200,7 → 2037,7
case 2:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
return -EINVAL;
}
 
2243,7 → 2080,9
else
dspcntr &= ~DISPPLANE_TILED;
 
/* must disable */
if (IS_HASWELL(dev))
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
else
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
I915_WRITE(reg, dspcntr);
2255,11 → 2094,12
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
 
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
2287,6 → 2127,44
}
 
#if 0
void intel_display_handle_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
/*
* Flips in the rings have been nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*
* Also update the base address of all primary
* planes to the the last fb to make sure we're
* showing the correct fb after a reset.
*
* Need to make two loops over the crtcs so that we
* don't try to grab a crtc mutex before the
* pending_flip_queue really got woken up.
*/
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
 
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
mutex_lock(&crtc->mutex);
if (intel_crtc->active)
dev_priv->display.update_plane(crtc, crtc->fb,
crtc->x, crtc->y);
mutex_unlock(&crtc->mutex);
}
}
 
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
2309,6 → 2187,33
 
return ret;
}
 
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (!dev->primary->master)
return;
 
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
 
switch (intel_crtc->pipe) {
case 0:
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
break;
case 1:
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
break;
default:
break;
}
}
#endif
 
static int
2328,8 → 2233,8
}
 
if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
intel_crtc->plane,
DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
plane_name(intel_crtc->plane),
INTEL_INFO(dev)->num_pipes);
return -EINVAL;
}
2359,11 → 2264,13
crtc->y = y;
 
if (old_fb) {
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
 
intel_update_fbc(dev);
intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
 
return 0;
2410,6 → 2317,11
FDI_FE_ERRC_ENABLE);
}
 
static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
{
return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
}
 
static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2419,10 → 2331,13
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
uint32_t temp;
 
/* When everything is off disable fdi C so that we could enable fdi B
* with all lanes. XXX: This misses the case where a pipe is not using
* any pch resources and so doesn't need any fdi lanes. */
if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
/*
* When everything is off disable fdi C so that we could enable fdi B
* with all lanes. Note that we don't care about enabled pipes without
* an enabled pch encoder.
*/
if (!pipe_has_enabled_pch(pipe_B_crtc) &&
!pipe_has_enabled_pch(pipe_C_crtc)) {
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
2460,8 → 2375,8
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
2558,8 → 2473,8
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2674,7 → 2589,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
u32 reg, temp, i, j;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
2690,15 → 2605,30
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
I915_READ(FDI_RX_IIR(pipe)));
 
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
I915_WRITE(reg, temp);
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
I915_WRITE(reg, temp);
 
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
2707,25 → 2637,14
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
 
POSTING_READ(reg);
udelay(150);
udelay(1); /* should be 0.5us */
 
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
udelay(500);
 
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2733,12 → 2652,16
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
i);
break;
}
udelay(1); /* should be 0.5us */
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
if (i == 4) {
DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
 
/* Train 2 */
reg = FDI_TX_CTL(pipe);
2745,8 → 2668,6
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp);
 
reg = FDI_RX_CTL(pipe);
2756,31 → 2677,27
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
udelay(150);
udelay(2); /* should be 1.5us */
 
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
udelay(500);
 
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
if (temp & FDI_RX_SYMBOL_LOCK) {
if (temp & FDI_RX_SYMBOL_LOCK ||
(I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
i);
goto train_done;
}
udelay(2); /* should be 1.5us */
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
}
 
train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
 
2795,8 → 2712,8
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
2942,8 → 2859,6
}
#endif
 
 
 
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
3032,6 → 2947,30
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
enum pipe pch_transcoder)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
 
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
I915_READ(HBLANK(cpu_transcoder)));
I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
I915_READ(HSYNC(cpu_transcoder)));
 
I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
I915_READ(VTOTAL(cpu_transcoder)));
I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
I915_READ(VBLANK(cpu_transcoder)));
I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
I915_READ(VSYNC(cpu_transcoder)));
I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
 
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
3048,7 → 2987,7
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
assert_transcoder_disabled(dev_priv, pipe);
assert_pch_transcoder_disabled(dev_priv, pipe);
 
/* Write the TU size bits before fdi link training, so that error
* detection works. */
3058,35 → 2997,15
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
 
/* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_pch_pll tries to do the right thing, but get_pch_pll
* unconditionally resets the pll - we need that to have the right LVDS
* enable sequence. */
ironlake_enable_pch_pll(intel_crtc);
 
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
if (HAS_PCH_CPT(dev)) {
u32 sel;
 
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
default:
case 0:
temp |= TRANSA_DPLL_ENABLE;
sel = TRANSA_DPLLB_SEL;
break;
case 1:
temp |= TRANSB_DPLL_ENABLE;
sel = TRANSB_DPLLB_SEL;
break;
case 2:
temp |= TRANSC_DPLL_ENABLE;
sel = TRANSC_DPLLB_SEL;
break;
}
if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
temp |= sel;
else
temp &= ~sel;
3093,17 → 3012,19
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
/* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
ironlake_enable_shared_dpll(intel_crtc);
 
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
 
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
 
intel_fdi_normal_train(crtc);
 
/* For PCH DP, enable TRANS_DP_CTL */
3152,75 → 3073,71
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
assert_transcoder_disabled(dev_priv, TRANSCODER_A);
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
 
lpt_program_iclkip(crtc);
 
/* Set transcoder timing. */
I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
 
I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
 
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
 
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
static void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_pch_pll *pll = intel_crtc->pch_pll;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
if (pll == NULL)
return;
 
if (pll->refcount == 0) {
WARN(1, "bad PCH PLL refcount\n");
WARN(1, "bad %s refcount\n", pll->name);
return;
}
 
--pll->refcount;
intel_crtc->pch_pll = NULL;
if (--pll->refcount == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
 
static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
 
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
int i;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
enum intel_dpll_id i;
 
pll = intel_crtc->pch_pll;
if (pll) {
DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
goto prepare;
DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
crtc->base.base.id, pll->name);
intel_put_shared_dpll(crtc);
}
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = intel_crtc->pipe;
pll = &dev_priv->pch_plls[i];
i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
 
DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
 
goto found;
}
 
for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
 
/* Only want to check enabled timings first */
if (pll->refcount == 0)
continue;
 
if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
fp == I915_READ(pll->fp0_reg)) {
DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
intel_crtc->base.base.id,
pll->pll_reg, pll->refcount, pll->active);
if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
sizeof(pll->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
crtc->base.base.id,
pll->name, pll->refcount, pll->active);
 
goto found;
}
3227,11 → 3144,11
}
 
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
if (pll->refcount == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
}
}
3239,24 → 3156,26
return NULL;
 
found:
intel_crtc->pch_pll = pll;
crtc->config.shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
 
if (pll->active == 0) {
memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
sizeof(pll->hw_state));
 
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
 
pll->mode_set(dev_priv, pll);
}
pll->refcount++;
DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
prepare: /* separate function? */
DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
 
/* Wait for the clocks to stabilize before rewriting the regs */
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(pll->pll_reg);
udelay(150);
 
I915_WRITE(pll->fp0_reg, fp);
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
pll->on = false;
return pll;
}
 
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe);
3266,10 → 3185,53
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
}
}
 
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
if (crtc->config.pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
}
}
 
static void intel_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct intel_plane *intel_plane;
 
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
if (intel_plane->pipe == pipe)
intel_plane_restore(&intel_plane->base);
}
 
static void intel_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct intel_plane *intel_plane;
 
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
if (intel_plane->pipe == pipe)
intel_plane_disable(&intel_plane->base);
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3278,7 → 3240,6
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
 
WARN_ON(!crtc->enabled);
 
3286,15 → 3247,16
return;
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
intel_update_watermarks(dev);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
if ((temp & LVDS_PORT_EN) == 0)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
 
if (intel_crtc->config.has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
3305,27 → 3267,8
assert_fdi_rx_disabled(dev_priv, pipe);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
ironlake_pfit_enable(intel_crtc);
 
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
 
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
3335,6 → 3278,8
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
// intel_crtc_update_cursor(crtc, true);
 
if (intel_crtc->config.has_pch_encoder)
ironlake_pch_enable(crtc);
3343,13 → 3288,11
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
cpt_verify_modeset(dev, intel_crtc->pipe);
 
/*
* There seems to be a race in PCH platform hw (at least on some
3362,6 → 3305,42
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
 
static void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
/* We can only enable IPS after we enable a plane and wait for a vblank.
* We guarantee that the plane is enabled by calling intel_enable_ips
* only after intel_enable_plane. And intel_enable_plane already waits
* for a vblank, so all we need to do here is to enable the IPS bit. */
assert_plane_enabled(dev_priv, crtc->plane);
I915_WRITE(IPS_CTL, IPS_ENABLE);
}
 
static void hsw_disable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
assert_plane_enabled(dev_priv, crtc->plane);
I915_WRITE(IPS_CTL, 0);
 
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev, crtc->pipe);
}
 
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3377,6 → 3356,11
return;
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
intel_update_watermarks(dev);
 
if (intel_crtc->config.has_pch_encoder)
3388,18 → 3372,7
 
intel_ddi_enable_pipe_clock(intel_crtc);
 
/* Enable panel fitting for eDP */
if (dev_priv->pch_pf_size &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
ironlake_pfit_enable(intel_crtc);
 
/*
* On ILK+ LUT must be loaded before the pipe is running but with
3413,7 → 3386,11
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
// intel_crtc_update_cursor(crtc, true);
 
hsw_enable_ips(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
 
3421,8 → 3398,6
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
3437,6 → 3412,21
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
}
}
 
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3456,58 → 3446,51
 
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
 
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
// intel_crtc_update_cursor(crtc, false);
 
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
 
intel_disable_pipe(dev_priv, pipe);
 
/* Disable PF */
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
ironlake_pfit_disable(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
ironlake_fdi_disable(crtc);
 
ironlake_disable_pch_transcoder(dev_priv, pipe);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
temp &= ~(TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
 
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
/* C shares PLL A or B */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
BUG(); /* wtf */
}
temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
/* disable PCH DPLL */
intel_disable_pch_pll(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
 
ironlake_fdi_pll_disable(intel_crtc);
}
 
intel_crtc->active = false;
intel_update_watermarks(dev);
3534,22 → 3517,23
encoder->disable(encoder);
 
 
/* FBC must be disabled before disabling the plane on HSW. */
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
hsw_disable_ips(intel_crtc);
 
// intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
intel_disable_pipe(dev_priv, pipe);
 
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
/* XXX: Once we have proper panel fitter state tracking implemented with
* hardware state read/check support we should switch to only disable
* the panel fitter when we know it's used. */
if (intel_using_power_well(dev)) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
}
ironlake_pfit_disable(intel_crtc);
 
intel_ddi_disable_pipe_clock(intel_crtc);
 
3559,6 → 3543,7
 
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
 
3573,17 → 3558,11
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_pch_pll(intel_crtc);
intel_put_shared_dpll(intel_crtc);
}
 
static void haswell_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
 
intel_ddi_put_crtc_pll(crtc);
}
 
3629,8 → 3608,32
}
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_config *pipe_config = &crtc->config;
 
if (!crtc->config.gmch_pfit.control)
return;
 
/*
* The panel fitter should only be adjusted whilst the pipe is disabled,
* according to register description and PRM.
*/
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc->pipe);
 
I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
 
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
 
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3646,24 → 3649,71
intel_crtc->active = true;
intel_update_watermarks(dev);
 
intel_enable_pll(dev_priv, pipe);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
vlv_enable_pll(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
i9xx_pfit_enable(intel_crtc);
 
intel_crtc_load_lut(crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
// intel_crtc_update_cursor(crtc, true);
 
intel_update_fbc(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
return;
 
intel_crtc->active = true;
intel_update_watermarks(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
i9xx_enable_pll(intel_crtc);
 
i9xx_pfit_enable(intel_crtc);
 
intel_crtc_load_lut(crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
g4x_fixup_plane(dev_priv, pipe);
// intel_crtc_update_cursor(crtc, true);
 
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
 
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
// intel_crtc_update_cursor(crtc, true);
 
intel_update_fbc(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
3672,21 → 3722,16
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
uint32_t pctl = I915_READ(PFIT_CONTROL);
 
if (!crtc->config.gmch_pfit.control)
return;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
if (INTEL_INFO(dev)->gen >= 4)
pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
else
pipe = PIPE_B;
 
if (pipe == crtc->pipe) {
DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl);
DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
I915_READ(PFIT_CONTROL));
I915_WRITE(PFIT_CONTROL, 0);
}
}
 
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
3706,19 → 3751,25
/* Give the overlay scaler a chance to disable if it's on this pipe */
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
intel_crtc_dpms_overlay(intel_crtc, false);
// intel_crtc_update_cursor(crtc, false);
 
if (dev_priv->cfb_plane == plane)
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
intel_crtc_dpms_overlay(intel_crtc, false);
// intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
intel_disable_pipe(dev_priv, pipe);
 
i9xx_pfit_disable(intel_crtc);
 
intel_disable_pll(dev_priv, pipe);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
i9xx_disable_pll(dev_priv, pipe);
 
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
3793,8 → 3844,8
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
 
dev_priv->display.crtc_disable(crtc);
intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
 
3821,16 → 3872,6
}
}
 
void intel_modeset_disable(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled)
intel_crtc_disable(crtc);
}
}
 
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3839,10 → 3880,10
kfree(intel_encoder);
}
 
/* Simple dpms helper for encodres with just one connector, no cloning and only
/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
3894,8 → 3935,6
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
 
/* All the simple cases only support two dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
3906,10 → 3945,8
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
if (encoder->base.crtc)
intel_encoder_dpms(encoder, mode);
else
WARN_ON(encoder->connectors_active != false);
if (connector->encoder)
intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
 
intel_modeset_check_state(connector->dev);
}
3925,31 → 3962,139
return encoder->get_hw_state(encoder, &pipe);
}
 
static bool intel_crtc_compute_config(struct drm_crtc *crtc,
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
 
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
 
if (IS_HASWELL(dev)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
return false;
} else {
return true;
}
}
 
if (INTEL_INFO(dev)->num_pipes == 2)
return true;
 
/* Ivybridge 3 pipe is really complicated */
switch (pipe) {
case PIPE_A:
return true;
case PIPE_B:
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
return true;
case PIPE_C:
if (!pipe_has_enabled_pch(pipe_B_crtc) ||
pipe_B_crtc->config.fdi_lanes <= 2) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
} else {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
return true;
default:
BUG();
}
}
 
#define RETRY 1
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int lane, link_bw, fdi_dotclock;
bool setup_ok, needs_recompute = false;
 
retry:
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
* mode pixel clock is stored in units of 1KHz.
* Hence the bw of each lane in terms of the mode signal
* is:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
fdi_dotclock = adjusted_mode->clock;
fdi_dotclock /= pipe_config->pixel_multiplier;
 
lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
pipe_config->pipe_bpp);
 
pipe_config->fdi_lanes = lane;
 
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
 
setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
intel_crtc->pipe, pipe_config);
if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
pipe_config->pipe_bpp);
needs_recompute = true;
pipe_config->bw_constrained = true;
 
goto retry;
}
 
if (needs_recompute)
return RETRY;
 
return setup_ok ? 0 : -EINVAL;
}
 
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
pipe_config->ips_enabled = i915_enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp <= 24;
}
 
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (pipe_config->requested_mode.clock * 3
> IRONLAKE_FDI_FREQ * 4)
return false;
return -EINVAL;
}
 
/* All interlaced capable intel hw wants timings in frames. Note though
* that intel_lvds_mode_fixup does some funny tricks with the crtc
* timings, so we need to be careful not to clobber these.*/
if (!pipe_config->timings_set)
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
* with a hsync front porch of 0.
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
return false;
return -EINVAL;
 
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
3959,7 → 4104,18
pipe_config->pipe_bpp = 8*3;
}
 
return true;
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
 
/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
* clock survives for now. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
 
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
 
return 0;
}
 
static int valleyview_get_display_clock_speed(struct drm_device *dev)
3982,6 → 4138,30
return 200000;
}
 
static int pnv_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
 
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
 
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
return 267000;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
return 333000;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
return 444000;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
return 200000;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
return 133000;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
return 167000;
}
}
 
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
4068,7 → 4248,7
{
if (i915_panel_use_ssc >= 0)
return i915_panel_use_ssc != 0;
return dev_priv->lvds_use_ssc
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
 
4104,7 → 4284,7
refclk = vlv_get_refclk(crtc);
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else if (!IS_GEN2(dev)) {
4116,28 → 4296,14
return refclk;
}
 
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
{
unsigned dotclock = crtc->config.adjusted_mode.clock;
struct dpll *clock = &crtc->config.dpll;
 
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
} else if (dotclock >= 140500 && dotclock <= 200000) {
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
return (1 << dpll->n) << 16 | dpll->m2;
}
 
crtc->config.clock_set = true;
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
{
return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
}
 
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4147,32 → 4313,94
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
u32 fp, fp2 = 0;
struct dpll *clock = &crtc->config.dpll;
 
if (IS_PINEVIEW(dev)) {
fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
fp = pnv_dpll_compute_fp(&crtc->config.dpll);
if (reduced_clock)
fp2 = (1 << reduced_clock->n) << 16 |
reduced_clock->m1 << 8 | reduced_clock->m2;
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
if (reduced_clock)
fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
reduced_clock->m2;
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
 
I915_WRITE(FP0(pipe), fp);
crtc->config.dpll_hw_state.fp0 = fp;
 
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
crtc->config.dpll_hw_state.fp1 = fp;
}
}
 
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
{
u32 reg_val;
 
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
reg_val &= 0xffffff00;
vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
}
 
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
 
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
} else {
I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
}
}
 
static void intel_dp_set_m_n(struct intel_crtc *crtc)
{
if (crtc->config.has_pch_encoder)
4186,24 → 4414,12
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 dpll, mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
bool is_sdvo;
u32 temp;
u32 coreclk, reg_val, dpll_md;
 
mutex_lock(&dev_priv->dpio_lock);
 
is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
 
dpll = DPLL_VGA_MODE_DIS;
dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
dpll |= DPLL_REFA_CLK_ENABLE_VLV;
dpll |= DPLL_INTEGRATED_CLOCK_VLV;
 
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
 
bestn = crtc->config.dpll.n;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2;
4210,72 → 4426,94
bestp1 = crtc->config.dpll.p1;
bestp2 = crtc->config.dpll.p2;
 
/*
* In Valleyview PLL and program lane counter registers are exposed
* through DPIO interface
*/
/* See eDP HDMI DPIO driver vbios notes doc */
 
/* PLL B needs special handling */
if (pipe)
vlv_pllb_recal_opamp(dev_priv);
 
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
 
/* Disable target IRef on PLL */
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
reg_val &= 0x00ffffff;
vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
 
/* Disable fast lock */
vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
 
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
mdiv |= (1 << DPIO_POST_DIV_SHIFT);
mdiv |= (1 << DPIO_K_SHIFT);
mdiv |= DPIO_ENABLE_CALIBRATION;
intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
/*
* Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
* but we don't support that).
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
(5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x00d0000f);
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df40000);
else
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df70000);
else
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
0x0df40000);
}
 
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
 
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
 
I915_WRITE(DPLL(pipe), dpll);
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
if (pipe)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
 
temp = 0;
if (is_sdvo) {
temp = 0;
if (crtc->config.pixel_multiplier > 1) {
temp = (crtc->config.pixel_multiplier - 1)
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
}
I915_WRITE(DPLL_MD(pipe), temp);
POSTING_READ(DPLL_MD(pipe));
crtc->config.dpll_hw_state.dpll_md = dpll_md;
 
/* Now program lane control registers */
if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
|| intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
}
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
 
if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
4285,8 → 4523,6
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->config.dpll;
4303,16 → 4539,16
else
dpll |= DPLLB_MODE_DAC_SERIAL;
 
if (is_sdvo) {
if ((crtc->config.pixel_multiplier > 1) &&
(IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
dpll |= (crtc->config.pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
 
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
dpll |= DPLL_DVO_HIGH_SPEED;
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
4339,12 → 4575,8
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
if (crtc->config.sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4352,52 → 4584,24
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
crtc->config.dpll_hw_state.dpll = dpll;
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
 
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
if (INTEL_INFO(dev)->gen >= 4) {
u32 temp = 0;
if (is_sdvo) {
temp = 0;
if (crtc->config.pixel_multiplier > 1) {
temp = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
}
 
static void i8xx_update_pll(struct intel_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll;
struct dpll *clock = &crtc->config.dpll;
 
4416,6 → 4620,9
dpll |= PLL_P2_DIVIDE_BY_4;
}
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4423,42 → 4630,29
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
crtc->config.dpll_hw_state.dpll = dpll;
}
 
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t vsyncshift;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
 
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
 
if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal / 2;
} else {
4480,10 → 4674,10
 
I915_WRITE(VTOTAL(cpu_transcoder),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
((crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(cpu_transcoder),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
((crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(cpu_transcoder),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
4503,6 → 4697,66
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
}
 
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
uint32_t tmp;
 
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HBLANK(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HSYNC(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
 
tmp = I915_READ(VTOTAL(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VBLANK(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VSYNC(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
 
if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->adjusted_mode.crtc_vtotal += 1;
pipe_config->adjusted_mode.crtc_vblank_end += 1;
}
 
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
}
 
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_crtc *crtc = &intel_crtc->base;
 
crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
 
crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
 
crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
crtc->mode.clock = pipe_config->adjusted_mode.clock;
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
}
 
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
4509,8 → 4763,12
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pipeconf;
 
pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
pipeconf = 0;
 
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
pipeconf |= PIPECONF_ENABLE;
 
if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
4521,26 → 4779,28
if (intel_crtc->config.requested_mode.clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPECONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
 
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
if (intel_crtc->config.has_dp_encoder) {
if (intel_crtc->config.dither) {
pipeconf |= PIPECONF_6BPC |
PIPECONF_DITHER_EN |
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
}
 
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
INTEL_OUTPUT_EDP)) {
if (intel_crtc->config.dither) {
pipeconf |= PIPECONF_6BPC |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
switch (intel_crtc->config.pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
case 24:
pipeconf |= PIPECONF_8BPC;
break;
case 30:
pipeconf |= PIPECONF_10BPC;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
BUG();
}
}
 
4550,11 → 4810,9
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
} else {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4561,12 → 4819,8
else
pipeconf |= PIPECONF_PROGRESSIVE;
 
if (IS_VALLEYVIEW(dev)) {
if (intel_crtc->config.limited_color_range)
if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
else
pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
}
 
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(intel_crtc->pipe));
4579,8 → 4833,6
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
4587,8 → 4839,8
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
4598,15 → 4850,6
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
 
num_connectors++;
4620,16 → 4863,14
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
&clock);
if (!ok) {
ok = dev_priv->display.find_dpll(limit, crtc,
intel_crtc->config.port_clock,
refclk, NULL, &clock);
if (!ok && !intel_crtc->config.clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
4637,10 → 4878,10
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
has_reduced_clock = limit->find_pll(limit, crtc,
has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&clock,
refclk, &clock,
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
4652,11 → 4893,8
intel_crtc->config.dpll.p2 = clock.p2;
}
 
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(intel_crtc);
 
if (IS_GEN2(dev))
i8xx_update_pll(intel_crtc, adjusted_mode,
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
else if (IS_VALLEYVIEW(dev))
4676,11 → 4914,8
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
 
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
intel_set_pipe_timings(intel_crtc);
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
4691,10 → 4926,6
 
i9xx_set_pipeconf(intel_crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
 
intel_wait_for_vblank(dev, pipe);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
4705,6 → 4936,33
return ret;
}
 
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
tmp = I915_READ(PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
 
/* Check whether the pfit is attached to our pipe. */
if (INTEL_INFO(dev)->gen < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
return;
}
 
pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
 
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
4712,10 → 4970,45
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
 
intel_get_pipe_timings(crtc, pipe_config);
 
i9xx_get_pfit_config(crtc, pipe_config);
 
if (INTEL_INFO(dev)->gen >= 4) {
tmp = I915_READ(DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
} else {
/* Note that on i915G/GM the pixel multiplier is in the sdvo
* port and will be fixed up in the encoder->get_config
* function. */
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) {
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
DPLL_PORTC_READY_MASK |
DPLL_PORTB_READY_MASK);
}
 
return true;
}
 
4727,7 → 5020,6
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_pch_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
4742,9 → 5034,7
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
if (intel_encoder_is_pch_edp(&encoder->base))
has_pch_edp = true;
else
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
has_cpu_edp = true;
break;
}
4751,7 → 5041,7
}
 
if (HAS_PCH_IBX(dev)) {
has_ck505 = dev_priv->display_clock_mode;
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
4758,9 → 5048,8
can_ssc = true;
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
has_panel, has_lvds, has_pch_edp, has_cpu_edp,
has_ck505);
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
has_panel, has_lvds, has_ck505);
 
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
4872,45 → 5161,10
BUG_ON(val != final);
}
 
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
static void lpt_init_pch_refclk(struct drm_device *dev)
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
bool is_sdv = false;
u32 tmp;
uint32_t tmp;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
}
}
 
if (!has_vga)
return;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
udelay(24);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
if (!is_sdv) {
tmp = I915_READ(SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
4924,22 → 5178,20
I915_WRITE(SOUTH_CHICKEN2, tmp);
 
if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
100))
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
 
/* WaMPhyProgramming:hsw */
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
{
uint32_t tmp;
 
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
tmp |= 0x7FFF;
intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
4948,24 → 5200,6
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
tmp |= (0x3F << 8);
intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
tmp |= (0x3F << 8);
intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
4974,7 → 5208,6
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
4984,7 → 5217,6
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
5006,7 → 5238,6
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5026,14 → 5257,101
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
 
/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
/* Implements 3 different sequences from BSpec chapter "Display iCLK
* Programming" based on the parameters passed:
* - Sequence to enable CLKOUT_DP
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
bool with_fdi)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg, tmp;
 
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
 
mutex_lock(&dev_priv->dpio_lock);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
udelay(24);
 
if (with_spread) {
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
if (with_fdi) {
lpt_reset_fdi_mphy(dev_priv);
lpt_program_fdi_mphy(dev_priv);
}
}
 
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg, tmp;
 
mutex_lock(&dev_priv->dpio_lock);
 
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
if (!(tmp & SBI_SSCCTL_DISABLE)) {
if (!(tmp & SBI_SSCCTL_PATHALT)) {
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(32);
}
tmp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void lpt_init_pch_refclk(struct drm_device *dev)
{
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
}
}
 
if (has_vga)
lpt_enable_clkout_dp(dev, true, true);
else
lpt_disable_clkout_dp(dev);
}
 
/*
* Initialize reference clocks when the driver loads
*/
5050,7 → 5368,6
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
 
5059,9 → 5376,6
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_EDP:
edp_encoder = encoder;
break;
}
num_connectors++;
}
5068,16 → 5382,14
 
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
dev_priv->lvds_ssc_freq);
return dev_priv->lvds_ssc_freq * 1000;
dev_priv->vbt.lvds_ssc_freq);
return dev_priv->vbt.lvds_ssc_freq * 1000;
}
 
return 120000;
}
 
static void ironlake_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5084,9 → 5396,8
int pipe = intel_crtc->pipe;
uint32_t val;
 
val = I915_READ(PIPECONF(pipe));
val = 0;
 
val &= ~PIPECONF_BPC_MASK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
5105,12 → 5416,10
BUG();
}
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
if (dither)
if (intel_crtc->config.dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
val &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
5117,8 → 5426,6
 
if (intel_crtc->config.limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
else
val &= ~PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
5188,9 → 5495,7
}
}
 
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5197,14 → 5502,12
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t val;
 
val = I915_READ(PIPECONF(cpu_transcoder));
val = 0;
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
if (dither)
if (intel_crtc->config.dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
val &= ~PIPECONF_INTERLACE_MASK_HSW;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
5211,10 → 5514,12
 
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
 
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
}
 
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
5224,7 → 5529,7
struct intel_encoder *intel_encoder;
int refclk;
const intel_limit_t *limit;
bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
bool ret, is_lvds = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
5231,15 → 5536,6
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
}
 
5251,8 → 5547,9
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
clock);
ret = dev_priv->display.find_dpll(limit, crtc,
to_intel_crtc(crtc)->config.port_clock,
refclk, NULL, clock);
if (!ret)
return false;
 
5263,16 → 5560,13
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
*has_reduced_clock = limit->find_pll(limit, crtc,
*has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
clock,
refclk, clock,
reduced_clock);
}
 
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
 
return true;
}
 
5294,65 → 5588,25
POSTING_READ(SOUTH_CHICKEN1);
}
 
static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
 
DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
if (intel_crtc->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 4;
 
return false;
}
 
if (INTEL_INFO(dev)->num_pipes == 2)
return true;
 
switch (intel_crtc->pipe) {
case PIPE_A:
return true;
break;
case PIPE_B:
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
intel_crtc->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 2;
 
return false;
}
 
if (intel_crtc->fdi_lanes > 2)
if (intel_crtc->config.fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
 
return true;
break;
case PIPE_C:
if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
if (intel_crtc->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 2;
 
return false;
}
} else {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
 
cpt_enable_fdi_bc_bifurcation(dev);
 
return true;
break;
default:
BUG();
}
5369,78 → 5623,13
return bps / (link_bw * 8) + 1;
}
 
void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
 
void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
}
}
 
static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct intel_link_m_n m_n = {0};
int target_clock, lane, link_bw;
 
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
* mode pixel clock is stored in units of 1KHz.
* Hence the bw of each lane in terms of the mode signal
* is:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
if (intel_crtc->config.pixel_target_clock)
target_clock = intel_crtc->config.pixel_target_clock;
else
target_clock = adjusted_mode->clock;
 
lane = ironlake_get_lanes_required(target_clock, link_bw,
intel_crtc->config.pipe_bpp);
 
intel_crtc->fdi_lanes = lane;
 
if (intel_crtc->config.pixel_multiplier > 1)
link_bw *= intel_crtc->config.pixel_multiplier;
intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
link_bw, &m_n);
 
intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
}
 
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
intel_clock_t *clock, u32 *fp,
u32 *fp,
intel_clock_t *reduced_clock, u32 *fp2)
{
struct drm_crtc *crtc = &intel_crtc->base;
5449,7 → 5638,7
struct intel_encoder *intel_encoder;
uint32_t dpll;
int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false, is_tv = false;
bool is_lvds = false, is_sdvo = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
5459,12 → 5648,7
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
 
num_connectors++;
5474,13 → 5658,13
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
dev_priv->vbt.lvds_ssc_freq == 100) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (is_sdvo && is_tv)
} else if (intel_crtc->config.sdvo_tv_clock)
factor = 20;
 
if (clock->m < factor * clock->n)
if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
*fp |= FP_CB_TUNE;
 
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
5492,23 → 5676,21
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
if (intel_crtc->config.pixel_multiplier > 1) {
 
dpll |= (intel_crtc->config.pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (intel_crtc->config.has_dp_encoder &&
intel_crtc->config.has_pch_encoder)
dpll |= DPLL_DVO_HIGH_SPEED;
 
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->config.has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
switch (clock->p2) {
switch (intel_crtc->config.dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
5523,18 → 5705,12
break;
}
 
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
 
return dpll;
return dpll | DPLL_VCO_ENABLE;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5544,19 → 5720,16
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int ret;
bool dither, fdi_config_ok;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
5571,11 → 5744,9
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
intel_crtc->config.cpu_transcoder = pipe;
 
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
ok = ironlake_compute_clocks(crtc, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok) {
if (!ok && !intel_crtc->config.clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
5588,84 → 5759,57
intel_crtc->config.dpll.p2 = clock.p2;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
/* determine panel color depth */
dither = intel_crtc->config.dither;
if (is_lvds && dev_priv->lvds_dither)
dither = true;
 
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
 
dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
dpll = ironlake_compute_dpll(intel_crtc,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
intel_crtc->config.dpll_hw_state.dpll = dpll;
intel_crtc->config.dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
intel_crtc->config.dpll_hw_state.fp1 = fp2;
else
intel_crtc->config.dpll_hw_state.fp1 = fp;
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
struct intel_pch_pll *pll;
 
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
pipe);
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe));
return -EINVAL;
}
} else
intel_put_pch_pll(intel_crtc);
intel_put_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (is_lvds && has_reduced_clock && i915_powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
 
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
if (intel_crtc->config.has_pch_encoder) {
pll = intel_crtc_to_shared_dpll(intel_crtc);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
 
intel_crtc->lowfreq_avail = false;
if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
}
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
if (IS_IVYBRIDGE(dev))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
 
/* Note, this also computes intel_crtc->fdi_lanes which is used below in
* ironlake_check_fdi_lanes. */
intel_crtc->fdi_lanes = 0;
if (intel_crtc->config.has_pch_encoder)
ironlake_fdi_set_m_n(crtc);
ironlake_set_pipeconf(crtc);
 
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_wait_for_vblank(dev, pipe);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
5674,11 → 5818,49
 
intel_update_watermarks(dev);
 
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
return ret;
}
 
return fdi_config_ok ? ret : -EINVAL;
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder transcoder = pipe_config->cpu_transcoder;
 
pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
& ~TU_SIZE_MASK;
pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
 
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
tmp = I915_READ(PF_CTL(crtc->pipe));
 
if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
 
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN7(dev)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
}
}
 
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
5686,114 → 5868,399
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
 
if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
 
pipe_config->has_pch_encoder = true;
 
tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
if (HAS_PCH_IBX(dev_priv->dev)) {
pipe_config->shared_dpll =
(enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
else
pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
}
 
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
 
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
 
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
 
intel_get_pipe_timings(crtc, pipe_config);
 
ironlake_get_pfit_config(crtc, pipe_config);
 
return true;
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = false;
struct drm_device *dev = dev_priv->dev;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
unsigned long irqflags;
uint32_t val;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (crtc->pipe != PIPE_A && crtc->base.enabled)
enable = true;
/* XXX: Should check for edp transcoder here, but thanks to init
* sequence that's not yet available. Just in case desktop eDP
* on PORT D is possible on haswell, too. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
 
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
WARN(plls->spll_refcount, "SPLL enabled\n");
WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
WARN((val & ~DE_PCH_EVENT_IVB) != val,
"Unexpected DEIMR bits enabled: 0x%x\n", val);
val = I915_READ(SDEIMR);
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
"Unexpected SDEIMR bits enabled: 0x%x\n", val);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->type != INTEL_OUTPUT_EDP &&
encoder->connectors_active)
enable = true;
/*
* This function implements pieces of two sequences from BSpec:
* - Sequence for display software to disable LCPLL
* - Sequence for display software to allow package C8+
* The steps implemented here are just the steps that actually touch the LCPLL
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
uint32_t val;
 
assert_can_disable_lcpll(dev_priv);
 
val = I915_READ(LCPLL_CTL);
 
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
 
val = I915_READ(LCPLL_CTL);
}
 
/* Even the eDP panel fitter is outside the always-on well. */
if (dev_priv->pch_pf_size)
enable = true;
val |= LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
 
intel_set_power_well(dev, enable);
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
 
val = I915_READ(D_COMP);
val |= D_COMP_COMP_DISABLE;
I915_WRITE(D_COMP, val);
POSTING_READ(D_COMP);
udelay(100);
 
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
 
if (allow_power_down) {
val = I915_READ(LCPLL_CTL);
val |= LCPLL_POWER_DOWN_ALLOW;
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
}
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
/*
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
bool is_cpu_edp = false;
struct intel_encoder *encoder;
int ret;
bool dither;
uint32_t val;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
is_cpu_edp = true;
break;
val = I915_READ(LCPLL_CTL);
 
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
return;
 
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
dev_priv->uncore.funcs.force_wake_get(dev_priv);
 
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
}
 
num_connectors++;
val = I915_READ(D_COMP);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
I915_WRITE(D_COMP, val);
POSTING_READ(D_COMP);
 
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
DRM_ERROR("LCPLL not locked yet\n");
 
if (val & LCPLL_CD_SOURCE_FCLK) {
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
}
 
if (is_cpu_edp)
intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
dev_priv->uncore.funcs.force_wake_put(dev_priv);
}
 
void hsw_enable_pc8_work(struct work_struct *__work)
{
struct drm_i915_private *dev_priv =
container_of(to_delayed_work(__work), struct drm_i915_private,
pc8.enable_work);
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
if (dev_priv->pc8.enabled)
return;
 
DRM_DEBUG_KMS("Enabling package C8+\n");
 
dev_priv->pc8.enabled = true;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
 
lpt_disable_clkout_dp(dev);
hsw_pc8_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
}
 
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 1,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
 
dev_priv->pc8.disable_count--;
if (dev_priv->pc8.disable_count != 0)
return;
 
schedule_delayed_work(&dev_priv->pc8.enable_work,
msecs_to_jiffies(i915_pc8_timeout));
}
 
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 0,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
 
dev_priv->pc8.disable_count++;
if (dev_priv->pc8.disable_count != 1)
return;
 
// cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (!dev_priv->pc8.enabled)
return;
 
DRM_DEBUG_KMS("Disabling package C8+\n");
 
hsw_restore_lcpll(dev_priv);
hsw_pc8_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
 
intel_prepare_ddi(dev);
i915_gem_init_swizzling(dev);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
dev_priv->pc8.enabled = false;
}
 
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
}
 
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
}
 
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
uint32_t val;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
if (crtc->base.enabled)
return false;
 
/* This case is still possible since we have the i915.disable_power_well
* parameter and also the KVMr or something else might be requesting the
* power well. */
val = I915_READ(HSW_PWR_WELL_DRIVER);
if (val != 0) {
DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
return false;
}
 
return true;
}
 
/* Since we're called from modeset_global_resources there's no way to
* symmetrically increase and decrease the refcount, so we use
* dev_priv->pc8.requirements_met to track whether we already have the refcount
* or not.
*/
static void hsw_update_package_c8(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
 
if (!i915_enable_pc8)
return;
 
mutex_lock(&dev_priv->pc8.lock);
 
allow = hsw_can_enable_package_c8(dev_priv);
 
if (allow == dev_priv->pc8.requirements_met)
goto done;
 
dev_priv->pc8.requirements_met = allow;
 
if (allow)
__hsw_enable_package_c8(dev_priv);
else
intel_crtc->config.cpu_transcoder = pipe;
__hsw_disable_package_c8(dev_priv);
 
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
done:
mutex_unlock(&dev_priv->pc8.lock);
}
 
WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
num_connectors, pipe_name(pipe));
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
hsw_enable_package_c8(dev_priv);
}
}
 
WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
hsw_disable_package_c8(dev_priv);
}
}
 
WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
static void haswell_modeset_global_resources(struct drm_device *dev)
{
bool enable = false;
struct intel_crtc *crtc;
 
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (!crtc->base.enabled)
continue;
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
crtc->config.cpu_transcoder != TRANSCODER_EDP)
enable = true;
}
 
/* determine panel color depth */
dither = intel_crtc->config.dither;
intel_set_power_well(dev, enable);
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
hsw_update_package_c8(dev);
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
int ret;
 
if (!intel_ddi_pll_mode_set(crtc))
return -EINVAL;
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_crtc->lowfreq_avail = false;
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
ironlake_fdi_set_m_n(crtc);
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
 
haswell_set_pipeconf(crtc, adjusted_mode, dither);
haswell_set_pipeconf(crtc);
 
intel_set_pipe_csc(crtc);
 
5805,8 → 6272,6
 
intel_update_watermarks(dev);
 
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 
return ret;
}
 
5815,23 → 6280,70
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
 
tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder));
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
if (tmp & TRANS_DDI_FUNC_ENABLE) {
enum pipe trans_edp_pipe;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
WARN(1, "unknown pipe linked to edp transcoder\n");
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
trans_edp_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
trans_edp_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
trans_edp_pipe = PIPE_C;
break;
}
 
if (trans_edp_pipe == crtc->pipe)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
 
if (!intel_display_power_enabled(dev,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
 
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
return false;
 
/*
* aswell has only FDI/PCH transcoder A. It is which is connected to
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
 
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
 
intel_get_pipe_timings(crtc, pipe_config);
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_enabled(dev, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
 
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
 
pipe_config->pixel_multiplier = 1;
 
return true;
}
 
5841,11 → 6353,8
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
5864,13 → 6373,8
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
if (encoder->mode_set) {
encoder->mode_set(encoder);
} else {
encoder_funcs = encoder->base.helper_private;
encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
}
}
 
return 0;
}
5976,15 → 6480,15
 
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
 
/* Enable HDMI mode */
tmp = I915_READ(aud_config);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
/* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp);
6068,7 → 6572,7
eldv |= IBX_ELD_VALIDB << 4;
eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
}
 
6136,17 → 6640,32
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int palreg = PALETTE(intel_crtc->pipe);
enum pipe pipe = intel_crtc->pipe;
int palreg = PALETTE(pipe);
int i;
bool reenable_ips = false;
 
/* The clocks have to be on to load the palette. */
if (!crtc->enabled || !intel_crtc->active)
return;
 
if (!HAS_PCH_SPLIT(dev_priv->dev))
assert_pll_enabled(dev_priv, pipe);
 
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
palreg = LGC_PALETTE(intel_crtc->pipe);
palreg = LGC_PALETTE(pipe);
 
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (intel_crtc->config.ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
}
 
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
6153,6 → 6672,9
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
}
 
if (reenable_ips)
hsw_enable_ips(intel_crtc);
}
 
#if 0
6229,8 → 6751,10
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
if (IS_HASWELL(dev))
if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
intel_crtc->cursor_visible = visible;
6367,7 → 6891,7
goto fail_unpin;
}
 
addr = obj->gtt_offset;
addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
6389,7 → 6913,7
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
 
6400,11 → 6924,12
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
 
// intel_crtc_update_cursor(crtc, true);
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
return 0;
fail_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
6419,7 → 6944,8
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
 
// intel_crtc_update_cursor(crtc, true);
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
return 0;
}
6513,18 → 7039,7
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
// obj = i915_gem_alloc_object(dev,
// intel_framebuffer_size_for_mode(mode, bpp));
// if (obj == NULL)
return ERR_PTR(-ENOMEM);
 
// mode_cmd.width = mode->hdisplay;
// mode_cmd.height = mode->vdisplay;
// mode_cmd.depth = depth;
// mode_cmd.bpp = bpp;
// mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
 
// return intel_framebuffer_create(dev, &mode_cmd, obj);
return NULL;
}
 
static struct drm_framebuffer *
6695,11 → 7210,12
}
 
/* Returns the clock of the currently programmed mode of the given pipe. */
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int pipe = pipe_config->cpu_transcoder;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
6738,11 → 7254,14
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
pipe_config->adjusted_mode.clock = 0;
return;
}
 
/* XXX: Handle the 100Mhz refclk */
intel_clock(dev, 96000, &clock);
if (IS_PINEVIEW(dev))
pineview_clock(96000, &clock);
else
i9xx_clock(96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
6754,9 → 7273,9
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
intel_clock(dev, 66000, &clock);
i9xx_clock(66000, &clock);
} else
intel_clock(dev, 48000, &clock);
i9xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
6769,16 → 7288,58
else
clock.p2 = 2;
 
intel_clock(dev, 48000, &clock);
i9xx_clock(48000, &clock);
}
}
 
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
pipe_config->adjusted_mode.clock = clock.dot;
}
 
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
int link_freq, repeat;
u64 clock;
u32 link_m, link_n;
 
repeat = pipe_config->pixel_multiplier;
 
/*
* The calculation for the data clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
* But we want to avoid losing precison if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
*
* and the link clock is simpler:
* link_clock = (m * link_clock * repeat) / n
*/
 
return clock.dot;
/*
* We need to get the FDI or DP link clock here to derive
* the M/N dividers.
*
* For FDI, we read it from the BIOS or use a fixed 2.7GHz.
* For DP, it's either 1.62GHz or 2.7GHz.
* We do our calculations in 10*MHz since we don't need much precison.
*/
if (pipe_config->has_pch_encoder)
link_freq = intel_fdi_link_freq(dev) * 10000;
else
link_freq = pipe_config->port_clock;
 
link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
 
if (!link_m || !link_n)
return;
 
clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
do_div(clock, link_n);
 
pipe_config->adjusted_mode.clock = clock;
}
 
/** Returns the currently programmed mode of the given pipe. */
6789,6 → 7350,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_config pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
6798,7 → 7360,18
if (!mode)
return NULL;
 
mode->clock = intel_crtc_clock_get(dev, crtc);
/*
* Construct a pipe_config sufficient for getting the clock info
* back out of crtc_clock_get.
*
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
pipe_config.pixel_multiplier = 1;
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
 
mode->clock = pipe_config.adjusted_mode.clock;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
6882,13 → 7455,19
 
void intel_mark_busy(struct drm_device *dev)
{
i915_update_gfx_val(dev->dev_private);
struct drm_i915_private *dev_priv = dev->dev_private;
 
hsw_package_c8_gpu_busy(dev_priv);
i915_update_gfx_val(dev_priv);
}
 
void intel_mark_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
hsw_package_c8_gpu_idle(dev_priv);
 
if (!i915_powersave)
return;
 
6900,7 → 7479,8
}
}
 
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
6912,8 → 7492,12
if (!crtc->fb)
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj == obj)
if (to_intel_framebuffer(crtc->fb)->obj != obj)
continue;
 
intel_increase_pllclock(crtc);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
 
7047,7 → 7631,8
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7075,7 → 7660,7
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
 
intel_mark_page_flip_active(intel_crtc);
7091,7 → 7676,8
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7116,7 → 7702,7
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
 
intel_mark_page_flip_active(intel_crtc);
7132,7 → 7718,8
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7156,7 → 7743,7
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
(obj->gtt_offset + intel_crtc->dspaddr_offset) |
(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
 
/* XXX Enabling the panel-fitter across page-flip is so far
7180,7 → 7767,8
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7199,7 → 7787,7
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
 
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
7221,23 → 7809,22
return ret;
}
 
/*
* On gen7 we currently use the blit ring because (in early silicon at least)
* the render ring doesn't give us interrpts for page flip completion, which
* means clients will hang after the first flip is queued. Fortunately the
* blit ring generates interrupts properly, so use it instead.
*/
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
int ret;
int len, ret;
 
ring = obj->ring;
if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
7258,13 → 7845,37
goto err_unpin;
}
 
ret = intel_ring_begin(ring, 4);
len = 4;
if (ring->id == RCS)
len += 6;
 
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
 
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
* more than one flip event at any time (or ensure that one flip message
* can be sent by waiting for flip-done prior to queueing new flips).
* Experimentation says that BCS works despite DERRMR masking all
* flip-done completion events and that unmasking all planes at once
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
 
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
 
intel_mark_page_flip_active(intel_crtc);
7280,7 → 7891,8
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj,
uint32_t flags)
{
return -ENODEV;
}
7287,7 → 7899,8
 
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
7357,12 → 7970,12
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
if (ret)
goto cleanup_pending;
 
intel_disable_fbc(dev);
intel_mark_fb_busy(obj);
intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
 
trace_i915_flip_request(intel_crtc->plane, obj);
7387,7 → 8000,6
 
return ret;
}
 
#endif
 
static struct drm_crtc_helper_funcs intel_helper_funcs = {
7395,28 → 8007,6
.load_lut = intel_crtc_load_lut,
};
 
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
struct intel_encoder *other_encoder;
struct drm_crtc *crtc = &encoder->new_crtc->base;
 
if (WARN_ON(!crtc))
return false;
 
list_for_each_entry(other_encoder,
&crtc->dev->mode_config.encoder_list,
base.head) {
 
if (&other_encoder->new_crtc->base != crtc ||
encoder == other_encoder)
continue;
else
return true;
}
 
return false;
}
 
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *crtc)
{
7484,13 → 8074,39
}
}
 
static void
connected_sink_compute_bpp(struct intel_connector * connector,
struct intel_crtc_config *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
 
/* Don't use an invalid EDID bpc value */
if (connector->base.display_info.bpc &&
connector->base.display_info.bpc * 3 < bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
bpp, connector->base.display_info.bpc*3);
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
 
/* Clamp bpp to 8 on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0 && bpp > 24) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
bpp);
pipe_config->pipe_bpp = 24;
}
}
 
static int
pipe_config_set_bpp(struct drm_crtc *crtc,
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_device *dev = crtc->base.dev;
struct intel_connector *connector;
int bpp;
 
switch (fb->pixel_format) {
7533,22 → 8149,67
 
/* Clamp display bpp to EDID value */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
if (connector->encoder && connector->encoder->crtc != crtc)
base.head) {
if (!connector->new_encoder ||
connector->new_encoder->new_crtc != crtc)
continue;
 
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc * 3 < bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
bpp, connector->display_info.bpc*3);
pipe_config->pipe_bpp = connector->display_info.bpc*3;
connected_sink_compute_bpp(connector, pipe_config);
}
}
 
return bpp;
}
 
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
const char *context)
{
DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
context, pipe_name(crtc->pipe));
 
DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
pipe_config->pipe_bpp, pipe_config->dither);
DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
pipe_config->has_pch_encoder,
pipe_config->fdi_lanes,
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
pipe_config->pch_pfit.pos,
pipe_config->pch_pfit.size,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
}
 
static bool check_encoder_cloning(struct drm_crtc *crtc)
{
int num_encoders = 0;
bool uncloneable_encoders = false;
struct intel_encoder *encoder;
 
list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
base.head) {
if (&encoder->new_crtc->base != crtc)
continue;
 
num_encoders++;
if (!encoder->cloneable)
uncloneable_encoders = true;
}
 
return !(num_encoders > 1 && uncloneable_encoders);
}
 
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
7555,11 → 8216,16
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp;
int plane_bpp, ret = -EINVAL;
bool retry = true;
 
if (!check_encoder_cloning(crtc)) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
return ERR_PTR(-EINVAL);
}
 
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
7566,11 → 8232,40
 
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
 
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
* after encoders and crtc also have had their say. */
plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
fb, pipe_config);
if (plane_bpp < 0)
goto fail;
 
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
 
/* Fill in default crtc timings, allow encoders to overwrite them. */
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
 
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
7581,30 → 8276,34
if (&encoder->new_crtc->base != crtc)
continue;
 
if (encoder->compute_config) {
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
 
continue;
}
 
encoder_funcs = encoder->base.helper_private;
if (!(encoder_funcs->mode_fixup(&encoder->base,
&pipe_config->requested_mode,
&pipe_config->adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
pipe_config->port_clock = pipe_config->adjusted_mode.clock;
 
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret < 0) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto fail;
}
}
 
if (!(intel_crtc_compute_config(crtc, pipe_config))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
if (ret == RETRY) {
if (WARN(!retry, "loop in pipe configuration computation\n")) {
ret = -EINVAL;
goto fail;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
retry = false;
goto encoder_retry;
}
 
pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7612,7 → 8311,7
return pipe_config;
fail:
kfree(pipe_config);
return ERR_PTR(-EINVAL);
return ERR_PTR(ret);
}
 
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
7708,6 → 8407,9
*/
*modeset_pipes &= 1 << intel_crtc->pipe;
*prepare_pipes &= 1 << intel_crtc->pipe;
 
DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
*modeset_pipes, *prepare_pipes, *disable_pipes);
}
 
static bool intel_crtc_in_use(struct drm_crtc *crtc)
7770,35 → 8472,152
 
}
 
static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
struct intel_crtc_config *new)
{
int clock1, clock2, diff;
 
clock1 = cur->adjusted_mode.clock;
clock2 = new->adjusted_mode.clock;
 
if (clock1 == clock2)
return true;
 
if (!clock1 || !clock2)
return false;
 
diff = abs(clock1 - clock2);
 
if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
return true;
 
return false;
}
 
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe)) \
if (mask & (1 <<(intel_crtc)->pipe))
 
static bool
intel_pipe_config_compare(struct intel_crtc_config *current_config,
intel_pipe_config_compare(struct drm_device *dev,
struct intel_crtc_config *current_config,
struct intel_crtc_config *pipe_config)
{
if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
DRM_ERROR("mismatch in has_pch_encoder "
"(expected %i, found %i)\n",
current_config->has_pch_encoder,
pipe_config->has_pch_encoder);
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
DRM_ERROR("mismatch in " #name " " \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
return false; \
}
 
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
DRM_ERROR("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
return false; \
}
 
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
DRM_ERROR("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
return false; \
}
 
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
 
PIPE_CONF_CHECK_I(cpu_transcoder);
 
PIPE_CONF_CHECK_I(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
PIPE_CONF_CHECK_I(fdi_m_n.link_m);
PIPE_CONF_CHECK_I(fdi_m_n.link_n);
PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
 
PIPE_CONF_CHECK_I(pixel_multiplier);
 
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
 
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
 
PIPE_CONF_CHECK_I(requested_mode.hdisplay);
PIPE_CONF_CHECK_I(requested_mode.vdisplay);
 
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
PIPE_CONF_CHECK_I(pch_pfit.size);
}
 
PIPE_CONF_CHECK_I(ips_enabled);
 
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_QUIRK
 
if (!IS_HASWELL(dev)) {
if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
current_config->adjusted_mode.clock,
pipe_config->adjusted_mode.clock);
return false;
}
}
 
return true;
}
 
void
intel_modeset_check_state(struct drm_device *dev)
static void
check_connector_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct intel_crtc_config pipe_config;
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
7809,7 → 8628,14
WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
}
}
 
static void
check_encoder_state(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
bool enabled = false;
7860,12 → 8686,23
tracked_pipe, pipe);
 
}
}
 
static void
check_crtc_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
bool enabled = false;
bool active = false;
 
memset(&pipe_config, 0, sizeof(pipe_config));
 
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
 
7880,6 → 8717,7
if (encoder->connectors_active)
active = true;
}
 
WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
7887,7 → 8725,6
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
 
memset(&pipe_config, 0, sizeof(pipe_config));
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
 
7895,16 → 8732,93
if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
active = crtc->active;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
if (encoder->get_config &&
encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
 
if (dev_priv->display.get_clock)
dev_priv->display.get_clock(crtc, &pipe_config);
 
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
 
WARN(active &&
!intel_pipe_config_compare(&crtc->config, &pipe_config),
"pipe state doesn't match!\n");
if (active &&
!intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
"[hw state]");
intel_dump_pipe_config(crtc, &crtc->config,
"[sw state]");
}
}
}
 
static void
check_shared_dpll_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_dpll_hw_state dpll_hw_state;
int i;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
int enabled_crtcs = 0, active_crtcs = 0;
bool active;
 
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
DRM_DEBUG_KMS("%s\n", pll->name);
 
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
WARN(pll->active > pll->refcount,
"more active pll users than references: %i vs %i\n",
pll->active, pll->refcount);
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
"pll in on but not on in use in sw tracking\n");
WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
}
WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
WARN(pll->refcount != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
pll->refcount, enabled_crtcs);
 
WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
}
 
void
intel_modeset_check_state(struct drm_device *dev)
{
check_connector_state(dev);
check_encoder_state(dev);
check_crtc_state(dev);
check_shared_dpll_state(dev);
}
 
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
7941,11 → 8855,10
 
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
}
 
DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
modeset_pipes, prepare_pipes, disable_pipes);
 
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
7958,12 → 8871,10
* to set it here already despite that we pass it down the callchain.
*/
if (modeset_pipes) {
enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
crtc->mode = *mode;
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
}
 
/* Only after disabling all output pipelines that will be changed can we
8011,7 → 8922,7
return ret;
}
 
int intel_set_mode(struct drm_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
8099,15 → 9010,20
}
 
static bool
is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
int num_connectors)
is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
 
for (i = 0; i < num_connectors; i++)
if (connectors[i].encoder &&
connectors[i].encoder->crtc == crtc &&
connectors[i].dpms != DRM_MODE_DPMS_ON)
if (set->num_connectors == 0)
return false;
 
if (WARN_ON(set->connectors == NULL))
return false;
 
for (i = 0; i < set->num_connectors; i++)
if (set->connectors[i]->encoder &&
set->connectors[i]->encoder->crtc == set->crtc &&
set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
 
return false;
8120,15 → 9036,21
 
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->connectors != NULL &&
is_crtc_connector_off(set->crtc, *set->connectors,
set->num_connectors)) {
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
 
if (intel_crtc->active && i915_fastboot) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
DRM_DEBUG_KMS("inactive crtc, full mode set\n");
config->mode_changed = true;
}
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
8148,6 → 9070,9
drm_mode_debug_printmodeline(set->mode);
config->mode_changed = true;
}
 
DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
set->crtc->base.id, config->mode_changed, config->fb_changed);
}
 
static int
8158,7 → 9083,7
struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
int count, ro;
int ro;
 
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
8165,7 → 9090,6
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* Otherwise traverse passed in connector list and get encoders
8199,7 → 9123,6
/* connector->new_encoder is now updated for all connectors. */
 
/* Update crtc of enabled connectors. */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (!connector->new_encoder)
8302,12 → 9225,6
goto fail;
 
if (config->mode_changed) {
if (set->mode) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
}
 
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
8318,7 → 9235,7
}
 
if (ret) {
DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
fail:
intel_set_config_restore_state(dev, config);
8350,23 → 9267,103
intel_ddi_pll_init(dev);
}
 
static void intel_pch_pll_init(struct drm_device *dev)
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
uint32_t val;
 
if (dev_priv->num_pch_pll == 0) {
DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
return;
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
 
return val & DPLL_VCO_ENABLE;
}
 
for (i = 0; i < dev_priv->num_pch_pll; i++) {
dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
}
 
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
 
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
 
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
 
/* Make sure no transcoder isn't still depending on us. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (intel_crtc_to_shared_dpll(crtc) == pll)
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
 
I915_WRITE(PCH_DPLL(pll->id), 0);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
 
static char *ibx_pch_dpll_names[] = {
"PCH DPLL A",
"PCH DPLL B",
};
 
static void ibx_pch_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
dev_priv->num_shared_dpll = 2;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
dev_priv->shared_dplls[i].get_hw_state =
ibx_pch_dpll_get_hw_state;
}
}
 
static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
 
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
DRM_DEBUG_KMS("%i shared PLLs initialized\n",
dev_priv->num_shared_dpll);
}
 
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
8389,7 → 9386,6
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
intel_crtc->config.cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
8472,13 → 9468,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds;
 
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
}
intel_lvds_init(dev);
 
if (!IS_ULT(dev))
intel_crt_init(dev);
8531,8 → 9522,13
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
PORT_C);
}
 
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
8551,11 → 9547,9
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
 
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
if (!found && SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_B, PORT_B);
}
}
 
/* Before G4X SDVOC doesn't have its own detect register */
 
8570,17 → 9564,13
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
if (SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_C, PORT_C);
}
}
 
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
(I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
8610,6 → 9600,7
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int pitch_limit;
int ret;
 
if (obj->tiling_mode == I915_TILING_Y) {
8623,10 → 9614,26
return -EINVAL;
}
 
/* FIXME <= Gen4 stride limits are bit unclear */
if (mode_cmd->pitches[0] > 32768) {
DRM_DEBUG("pitch (%d) must be at less than 32768\n",
mode_cmd->pitches[0]);
if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode)
pitch_limit = 16*1024;
else
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 3) {
if (obj->tiling_mode)
pitch_limit = 8*1024;
else
pitch_limit = 16*1024;
} else
/* XXX DSPC is limited to 4k tiled */
pitch_limit = 8*1024;
 
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
obj->tiling_mode ? "tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
return -EINVAL;
}
 
8647,7 → 9654,8
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
8658,7 → 9666,8
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
8667,12 → 9676,14
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 5) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
default:
DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
 
8703,6 → 9714,15
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
dev_priv->display.find_dpll = g4x_find_best_dpll;
else if (IS_VALLEYVIEW(dev))
dev_priv->display.find_dpll = vlv_find_best_dpll;
else if (IS_PINEVIEW(dev))
dev_priv->display.find_dpll = pnv_find_best_dpll;
else
dev_priv->display.find_dpll = i9xx_find_best_dpll;
 
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8712,13 → 9732,23
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
8736,9 → 9766,12
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_PINEVIEW(dev))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
8817,6 → 9850,17
DRM_INFO("applying inverted panel brightness quirk\n");
}
 
/*
* Some machines (Dell XPS13) suffer broken backlight controls if
* BLM_PCH_PWM_ENABLE is set.
*/
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
}
 
struct intel_quirk {
int device;
int subsystem_vendor;
8886,6 → 9930,11
 
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
 
static void intel_init_quirks(struct drm_device *dev)
8980,18 → 10029,18
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < dev_priv->num_plane; j++) {
ret = intel_plane_init(dev, i, j);
if (ret)
DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
i, j, ret);
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
pipe_name(i), sprite_name(i, j), ret);
}
}
 
intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
intel_shared_dpll_init(dev);
 
/* Just disable it once at startup */
i915_disable_vga(dev);
9186,6 → 10235,17
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
 
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
* paranoid "someone might have enabled VGA while we were not looking"
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (HAS_POWER_WELL(dev) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
 
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
9192,57 → 10252,18
}
}
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 tmp;
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
int i;
 
if (HAS_DDI(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
if (tmp & TRANS_DDI_FUNC_ENABLE) {
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
default:
/* A bogus value has been programmed, disable
* the transcoder */
WARN(1, "Bogus eDP source %08x\n", tmp);
intel_ddi_disable_transcoder_func(dev_priv,
TRANSCODER_EDP);
goto setup_pipes;
}
 
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
crtc->config.cpu_transcoder = TRANSCODER_EDP;
 
DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
pipe_name(pipe));
}
}
 
setup_pipes:
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
enum transcoder tmp = crtc->config.cpu_transcoder;
memset(&crtc->config, 0, sizeof(crtc->config));
crtc->config.cpu_transcoder = tmp;
 
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
9254,16 → 10275,35
crtc->active ? "enabled" : "disabled");
}
 
/* FIXME: Smash this into the new shared dpll infrastructure. */
if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
pll->active = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
pll->active++;
}
pll->refcount = pll->active;
 
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
 
if (encoder->get_hw_state(encoder, &pipe)) {
encoder->base.crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
if (encoder->get_config)
encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}
9276,6 → 10316,15
pipe);
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (!crtc->active)
continue;
if (dev_priv->display.get_clock)
dev_priv->display.get_clock(crtc,
&crtc->config);
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
9291,7 → 10340,38
drm_get_connector_name(&connector->base),
connector->base.encoder ? "enabled" : "disabled");
}
}
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
int i;
 
intel_modeset_readout_hw_state(dev);
 
/*
* Now that we have the config, copy it to each CRTC struct
* Note that this could go away if we move to using crtc_config
* checking everywhere.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->active && i915_fastboot) {
intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
 
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
}
}
 
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
9301,8 → 10381,21
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
}
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
if (!pll->on || pll->active)
continue;
 
DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
 
pll->disable(dev_priv, pll);
pll->on = false;
}
 
if (force_restore) {
/*
* We need to use raw interfaces for restoring state to avoid
9342,20 → 10435,29
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
 
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of rps, connectors, ...) would
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
// drm_kms_helper_poll_fini(dev);
 
mutex_lock(&dev->struct_mutex);
 
// intel_unregister_dsm_handler();
 
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
 
intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
 
9365,14 → 10467,10
 
ironlake_teardown_rc6(dev);
 
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
 
mutex_unlock(&dev->struct_mutex);
 
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
// drm_irq_uninstall(dev);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
// cancel_work_sync(&dev_priv->hotplug_work);
// cancel_work_sync(&dev_priv->rps.work);
 
9420,6 → 10518,11
#include <linux/seq_file.h>
 
struct intel_display_error_state {
 
u32 power_well_driver;
 
int num_transcoders;
 
struct intel_cursor_error_state {
u32 control;
u32 position;
9428,15 → 10531,7
} cursor[I915_MAX_PIPES];
 
struct intel_pipe_error_state {
u32 conf;
u32 source;
 
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} pipe[I915_MAX_PIPES];
 
struct intel_plane_error_state {
9448,6 → 10543,19
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
 
struct intel_transcoder_error_state {
enum transcoder cpu_transcoder;
 
u32 conf;
 
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} transcoder[4];
};
 
struct intel_display_error_state *
9455,16 → 10563,25
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
enum transcoder cpu_transcoder;
int transcoders[] = {
TRANSCODER_A,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP,
};
int i;
 
if (INTEL_INFO(dev)->num_pipes == 0)
return NULL;
 
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
 
if (HAS_POWER_WELL(dev))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
for_each_pipe(i) {
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
 
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
9488,56 → 10605,86
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
 
error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
 
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
if (HAS_DDI(dev_priv->dev))
error->num_transcoders++; /* Account for eDP. */
 
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
 
error->transcoder[i].cpu_transcoder = cpu_transcoder;
 
error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
 
/* In the code above we read the registers without checking if the power
* well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
* prevent the next I915_WRITE from detecting it and printing an error
* message. */
intel_uncore_clear_errors(dev);
 
return error;
}
 
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
 
void
intel_display_print_error_state(struct seq_file *m,
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
int i;
 
seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (!error)
return;
 
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
 
seq_printf(m, "Plane [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
if (INTEL_INFO(dev)->gen <= 3) {
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
seq_printf(m, " POS: %08x\n", error->plane[i].pos);
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
 
seq_printf(m, "Cursor [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
seq_printf(m, " POS: %08x\n", error->cursor[i].position);
seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
err_printf(m, "Cursor [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
 
for (i = 0; i < error->num_transcoders; i++) {
err_printf(m, " CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
#endif
/drivers/video/drm/i915/intel_dp.c
52,30 → 52,6
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
 
/**
* is_pch_edp - is the port on the PCH and attached to an eDP panel?
* @intel_dp: DP struct
*
* Returns true if the given DP struct corresponds to a PCH DP port attached
* to an eDP panel, false otherwise. Helpful for determining whether we
* may need FDI resources for a given DP output or not.
*/
static bool is_pch_edp(struct intel_dp *intel_dp)
{
return intel_dp->is_pch_edp;
}
 
/**
* is_cpu_edp - is the port on the CPU and attached to an eDP panel?
* @intel_dp: DP struct
*
* Returns true if the given DP struct corresponds to a CPU eDP port.
*/
static bool is_cpu_edp(struct intel_dp *intel_dp)
{
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
 
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
88,25 → 64,6
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
 
/**
* intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
* @encoder: DRM encoder
*
* Return true if @encoder corresponds to a PCH attached eDP panel. Needed
* by intel_display.c.
*/
bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
 
if (!encoder)
return false;
 
intel_dp = enc_to_intel_dp(encoder);
 
return is_pch_edp(intel_dp);
}
 
static void intel_dp_link_down(struct intel_dp *intel_dp);
 
static int
118,7 → 75,12
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
max_link_bw = DP_LINK_BW_2_7;
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
max_link_bw);
max_link_bw = DP_LINK_BW_1_62;
break;
}
303,7 → 265,7
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies(10));
msecs_to_jiffies_timeout(10));
else
done = wait_for_atomic(C, 10) == 0;
if (!done)
314,6 → 276,45
return status;
}
 
static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
*/
if (IS_VALLEYVIEW(dev)) {
return index ? 0 : 100;
} else if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
if (HAS_DDI(dev))
return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
else if (IS_GEN6(dev) || IS_GEN7(dev))
return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
return 225; /* eDP input clock at 450Mhz */
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
case 1: return 72;
default: return 0;
}
} else if (HAS_PCH_SPLIT(dev)) {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
return index ? 0 :intel_hrawclk(dev) / 2;
}
}
 
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
324,10 → 325,10
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t ch_data = ch_ctl + 4;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
int try, precharge, clock = 0;
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
 
/* dp aux is extremely sensitive to irq latency, hence request the
337,30 → 338,6
// pm_qos_update_request(&dev_priv->pm_qos, 0);
 
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
if (HAS_DDI(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100;
else if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
aux_clock_divider = 74;
} else if (HAS_PCH_SPLIT(dev)) {
aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
aux_clock_divider = intel_hrawclk(dev) / 2;
}
 
if (IS_GEN6(dev))
precharge = 3;
367,6 → 344,8
else
precharge = 5;
 
intel_aux_display_runtime_get(dev_priv);
 
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ_NOTRACE(ch_ctl);
382,6 → 361,7
goto out;
}
 
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
416,6 → 396,9
if (status & DP_AUX_CH_CTL_DONE)
break;
}
if (status & DP_AUX_CH_CTL_DONE)
break;
}
 
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
453,6 → 436,7
ret = recv_bytes;
out:
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_aux_display_runtime_put(dev_priv);
 
return ret;
}
604,7 → 588,7
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
udelay(100);
udelay(500);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
660,6 → 644,49
return ret;
}
 
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_G4X(dev)) {
if (link_bw == DP_LINK_BW_1_62) {
pipe_config->dpll.p1 = 2;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.n = 2;
pipe_config->dpll.m1 = 23;
pipe_config->dpll.m2 = 8;
} else {
pipe_config->dpll.p1 = 1;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.n = 1;
pipe_config->dpll.m1 = 14;
pipe_config->dpll.m2 = 2;
}
pipe_config->clock_set = true;
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
if (link_bw == DP_LINK_BW_1_62) {
pipe_config->dpll.n = 1;
pipe_config->dpll.p1 = 2;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.m1 = 12;
pipe_config->dpll.m2 = 9;
} else {
pipe_config->dpll.n = 2;
pipe_config->dpll.p1 = 1;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.m1 = 14;
pipe_config->dpll.m2 = 8;
}
pipe_config->clock_set = true;
} else if (IS_VALLEYVIEW(dev)) {
/* FIXME: Need to figure out optimized DP clocks for vlv. */
}
}
 
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
667,8 → 694,9
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *intel_crtc = encoder->new_crtc;
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
675,9 → 703,9
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
int target_clock, link_avail, link_clock;
int link_avail, link_clock;
 
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
 
pipe_config->has_dp_encoder = true;
685,12 → 713,13
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
if (!HAS_PCH_SPLIT(dev))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* We need to take the panel's fixed mode into account. */
target_clock = adjusted_mode->clock;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
701,12 → 730,15
 
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
if (is_edp(intel_dp) && dev_priv->edp.bpp)
bpp = min_t(int, bpp, dev_priv->edp.bpp);
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
}
 
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(target_clock, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
 
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
741,20 → 773,21
 
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
pipe_config->pipe_bpp = bpp;
pipe_config->pixel_target_clock = target_clock;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
 
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock, bpp);
pipe_config->port_clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
 
intel_link_compute_m_n(bpp, lane_count,
target_clock, adjusted_mode->clock,
adjusted_mode->clock, pipe_config->port_clock,
&pipe_config->dp_m_n);
 
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
 
return true;
}
 
773,24 → 806,28
}
}
 
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = crtc->dev;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
if (clock < 200000) {
if (crtc->config.port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
dpa_ctl |= DP_PLL_FREQ_160MHZ;
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
 
I915_WRITE(DP_A, dpa_ctl);
799,15 → 836,14
udelay(500);
}
 
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_dp_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 
/*
* There are four kinds of DP registers:
833,23 → 869,13
 
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
 
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DP_PORT_WIDTH_1;
break;
case 2:
intel_dp->DP |= DP_PORT_WIDTH_2;
break;
case 4:
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
 
intel_dp_init_link_config(intel_dp);
856,7 → 882,7
 
/* Split out the IBX/CPU vs CPT settings */
 
if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
866,14 → 892,8
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
intel_dp->DP |= intel_crtc->pipe << 29;
 
/* don't miss out required setting for eDP */
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= crtc->pipe << 29;
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
intel_dp->DP |= intel_dp->color_range;
 
886,22 → 906,14
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
if (intel_crtc->pipe == 1)
if (crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
 
if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
/* don't miss out required setting for eDP */
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
 
if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
if (port == PORT_A && !IS_VALLEYVIEW(dev))
ironlake_set_pll_cpu_edp(intel_dp);
}
 
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1072,8 → 1084,8
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
schedule_delayed_work(&intel_dp->panel_vdd_work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
// schedule_delayed_work(&intel_dp->panel_vdd_work,
// msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
}
 
1290,6 → 1302,7
enum pipe *pipe)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp = I915_READ(intel_dp->output_reg);
1297,9 → 1310,9
if (!(tmp & DP_PORT_EN))
return false;
 
if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
u32 trans_sel;
1335,9 → 1348,317
return true;
}
 
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (tmp & DP_SYNC_VS_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
} else {
tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
}
 
pipe_config->adjusted_mode.flags |= flags;
 
if (dp_to_dig_port(intel_dp)->port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
}
}
 
static bool is_edp_psr(struct intel_dp *intel_dp)
{
return is_edp(intel_dp) &&
intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
 
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_HASWELL(dev))
return false;
 
return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
}
 
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
 
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
the video DIP being updated before program video DIP data buffer
registers for DIP being updated. */
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
 
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
if (i < sizeof(struct edp_vsc_psr))
I915_WRITE(data_reg + i, *data++);
else
I915_WRITE(data_reg + i, 0);
}
 
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
 
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
 
if (intel_dp->psr_setup_done)
return;
 
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
intel_dp->psr_setup_done = true;
}
 
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
 
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
DP_PSR_ENABLE &
~DP_PSR_MAIN_LINK_ACTIVE);
else
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
DP_PSR_ENABLE |
DP_PSR_MAIN_LINK_ACTIVE);
 
/* Setup AUX registers */
I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
I915_WRITE(EDP_PSR_AUX_CTL,
DP_AUX_CH_CTL_TIME_OUT_400us |
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
 
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
} else
val |= EDP_PSR_LINK_DISABLE;
 
I915_WRITE(EDP_PSR_CTL, val |
EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
}
 
static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
if (!IS_HASWELL(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
dev_priv->no_psr_reason = PSR_NO_SOURCE;
return false;
}
 
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
(dig_port->port != PORT_A)) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
return false;
}
 
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
dev_priv->no_psr_reason = PSR_NO_SINK;
return false;
}
 
if (!i915_enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
dev_priv->no_psr_reason = PSR_MODULE_PARAM;
return false;
}
 
crtc = dig_port->base.base.crtc;
if (crtc == NULL) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
 
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
 
obj = to_intel_framebuffer(crtc->fb)->obj;
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
dev_priv->no_psr_reason = PSR_NOT_TILED;
return false;
}
 
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
return false;
}
 
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
dev_priv->no_psr_reason = PSR_S3D_ENABLED;
return false;
}
 
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
return false;
}
 
return true;
}
 
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (!intel_edp_psr_match_conditions(intel_dp) ||
intel_edp_is_psr_enabled(dev))
return;
 
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
 
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
 
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
}
 
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (intel_edp_psr_match_conditions(intel_dp) &&
!intel_edp_is_psr_enabled(dev))
intel_edp_psr_do_enable(intel_dp);
}
 
void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!intel_edp_is_psr_enabled(dev))
return;
 
I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
}
 
void intel_edp_psr_update(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
 
if (!is_edp_psr(intel_dp))
return;
 
if (!intel_edp_psr_match_conditions(intel_dp))
intel_edp_psr_disable(intel_dp);
else
if (!intel_edp_is_psr_enabled(dev))
intel_edp_psr_do_enable(intel_dp);
}
}
 
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
1347,7 → 1668,7
ironlake_edp_panel_off(intel_dp);
 
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!is_cpu_edp(intel_dp))
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
intel_dp_link_down(intel_dp);
}
 
1354,9 → 1675,10
static void intel_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
 
if (is_cpu_edp(intel_dp)) {
if (port == PORT_A || IS_VALLEYVIEW(dev)) {
intel_dp_link_down(intel_dp);
if (!IS_VALLEYVIEW(dev))
ironlake_edp_pll_off(intel_dp);
1383,15 → 1705,78
ironlake_edp_backlight_on(intel_dp);
}
 
static void vlv_enable_dp(struct intel_encoder *encoder)
{
}
 
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 
if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
if (dport->port == PORT_A)
ironlake_edp_pll_on(intel_dp);
}
 
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
 
mutex_unlock(&dev_priv->dpio_lock);
 
intel_enable_dp(encoder);
 
vlv_wait_port_ready(dev_priv, port);
}
 
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
 
if (!IS_VALLEYVIEW(dev))
return;
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
 
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
 
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
1451,10 → 1836,13
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
else if (HAS_PCH_CPT(dev) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_1200;
else
return DP_TRAIN_VOLTAGE_SWING_800;
1464,6 → 1852,7
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (HAS_DDI(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1477,10 → 1866,22
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_GEN7(dev) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
1502,6 → 1903,103
}
}
 
static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
int port = vlv_dport_to_channel(dport);
 
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
preemph_reg_value = 0x0004000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x2B405555;
uniqtranscale_reg_value = 0x552AB83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x5548B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
demph_reg_value = 0x2B245555;
uniqtranscale_reg_value = 0x5560B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
demph_reg_value = 0x2B405555;
uniqtranscale_reg_value = 0x5598DA3A;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
preemph_reg_value = 0x0002000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x5552B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
demph_reg_value = 0x2B404848;
uniqtranscale_reg_value = 0x5580B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_6:
preemph_reg_value = 0x0000000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x2B305555;
uniqtranscale_reg_value = 0x5570B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
demph_reg_value = 0x2B2B4040;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
preemph_reg_value = 0x0006000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
demph_reg_value = 0x1B405555;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
return 0;
}
break;
default:
return 0;
}
 
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
uniqtranscale_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
 
return 0;
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
1669,6 → 2167,7
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
1676,10 → 2175,13
if (HAS_DDI(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
} else if (IS_GEN7(dev) && port == PORT_A) {
signal_levels = intel_gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
} else if (IS_GEN6(dev) && port == PORT_A) {
signal_levels = intel_gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
1729,8 → 2231,7
}
I915_WRITE(DP_TP_CTL(port), temp);
 
} else if (HAS_PCH_CPT(dev) &&
(IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1828,7 → 2329,6
struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
 
1846,7 → 2346,6
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
1867,7 → 2366,6
 
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
}
 
1981,6 → 2479,7
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
2010,7 → 2509,7
 
DRM_DEBUG_KMS("\n");
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
2072,6 → 2571,13
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
 
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
if (is_edp_psr(intel_dp))
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
2301,11 → 2807,10
return NULL;
 
size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
if (!edid)
return NULL;
 
memcpy(edid, intel_connector->edid, size);
return edid;
}
 
2340,6 → 2845,9
enum drm_connector_status status;
struct edid *edid = NULL;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
intel_dp->has_audio = false;
 
if (HAS_PCH_SPLIT(dev))
2499,15 → 3007,16
}
 
static void
intel_dp_destroy(struct drm_connector *connector)
intel_dp_connector_destroy(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
 
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
 
if (is_edp(intel_dp))
/* Can't call is_edp() since the encoder may have been destroyed
* already. */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
 
drm_sysfs_connector_remove(connector);
2532,16 → 3041,12
kfree(intel_dig_port);
}
 
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_set = intel_dp_mode_set,
};
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
.destroy = intel_dp_destroy,
.destroy = intel_dp_connector_destroy,
};
 
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2588,11 → 3093,11
struct child_device_config *p_child;
int i;
 
if (!dev_priv->child_dev_num)
if (!dev_priv->vbt.child_dev_num)
return false;
 
for (i = 0; i < dev_priv->child_dev_num; i++) {
p_child = dev_priv->child_dev + i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
 
if (p_child->dvo_port == PORT_IDPD &&
p_child->device_type == DEVICE_TYPE_eDP)
2670,7 → 3175,7
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
 
vbt = dev_priv->edp.pps;
vbt = dev_priv->vbt.edp_pps;
 
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
2738,9 → 3243,6
pp_div_reg = PIPEA_PP_DIVISOR;
}
 
if (IS_VALLEYVIEW(dev))
port_sel = I915_READ(pp_on_reg) & 0xc0000000;
 
/* And finally store the new values in the power sequencer. */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2754,8 → 3256,10
 
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (is_cpu_edp(intel_dp))
if (IS_VALLEYVIEW(dev)) {
port_sel = I915_READ(pp_on_reg) & 0xc0000000;
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (dp_to_dig_port(intel_dp)->port == PORT_A)
port_sel = PANEL_POWER_PORT_DP_A;
else
port_sel = PANEL_POWER_PORT_DP_D;
2773,7 → 3277,85
I915_READ(pp_div_reg));
}
 
void
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
struct edp_power_seq power_seq = { 0 };
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
 
if (!is_edp(intel_dp))
return true;
 
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 
/* Cache DPCD and EDID for edp. */
ironlake_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
 
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
return false;
}
 
/* We now know it's not a ghost, init power sequence regs. */
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
 
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
edid);
drm_edid_to_eld(connector, edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
}
} else {
edid = ERR_PTR(-ENOENT);
}
intel_connector->edid = edid;
 
/* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
 
/* fallback to VBT if available for eDP */
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev,
dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
ironlake_edp_panel_vdd_off(intel_dp, false);
 
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
 
return true;
}
 
bool
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
2782,38 → 3364,47
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
struct edp_power_seq power_seq = { 0 };
enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
int type, error;
 
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
 
if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
 
type = DRM_MODE_CONNECTOR_DisplayPort;
/*
* FIXME : We need to initialize built-in panels before external panels.
* For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
*/
if (IS_VALLEYVIEW(dev) && port == PORT_C) {
switch (port) {
case PORT_A:
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else if (port == PORT_A || is_pch_edp(intel_dp)) {
break;
case PORT_C:
if (IS_VALLEYVIEW(dev))
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
* DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
* rewrite it.
*/
type = DRM_MODE_CONNECTOR_DisplayPort;
break;
case PORT_D:
if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
default: /* silence GCC warning */
break;
}
 
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
* for DP the encoder type can be set by the caller to
* INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
*/
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
 
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
 
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
2873,76 → 3464,25
BUG();
}
 
if (is_edp(intel_dp))
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
error = intel_dp_i2c_init(intel_dp, intel_connector, name);
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
 
intel_dp_i2c_init(intel_dp, intel_connector, name);
intel_dp->psr_setup_done = false;
 
/* Cache DPCD and EDID for edp. */
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
i2c_del_adapter(&intel_dp->adapter);
if (is_edp(intel_dp)) {
bool ret;
struct drm_display_mode *scan;
struct edid *edid;
 
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
 
if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
intel_dp_encoder_destroy(&intel_encoder->base);
intel_dp_destroy(connector);
return;
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
 
/* We now know it's not a ghost, init power sequence regs. */
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
 
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_edid_to_eld(connector, edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
return false;
}
} else {
edid = ERR_PTR(-ENOENT);
}
intel_connector->edid = edid;
 
/* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
 
/* fallback to VBT if available for eDP */
if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
ironlake_edp_panel_vdd_off(intel_dp, false);
}
 
if (is_edp(intel_dp)) {
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
}
 
intel_dp_add_properties(intel_dp, connector);
 
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2953,6 → 3493,8
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
 
return true;
}
 
void
2978,14 → 3520,21
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
} else {
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->enable = intel_enable_dp;
}
 
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
2995,5 → 3544,9
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_dp_hot_plug;
 
intel_dp_init_connector(intel_dig_port, intel_connector);
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(intel_connector);
}
}
/drivers/video/drm/i915/intel_drv.h
26,6 → 26,7
#define __INTEL_DRV_H__
 
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
64,29 → 65,11
ret__; \
})
 
#define wait_for_atomic_us(COND, US) ({ \
unsigned long timeout__ = GetTimerTicks() + usecs_to_jiffies(US); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(GetTimerTicks(), timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
} \
ret__; \
})
 
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
#define wait_for_atomic_us(COND, US) _wait_for((COND), \
DIV_ROUND_UP((US), 1000), 0)
 
#define MSLEEP(x) do { \
if (in_dbg_master()) \
mdelay(x); \
else \
msleep(x); \
} while(0)
 
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
 
143,7 → 126,6
struct intel_crtc *new_crtc;
 
int type;
bool needs_tv_clock;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
163,6 → 145,12
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
/* Reconstructs the equivalent mode flags for the current hardware
* state. This must be called _after_ display->get_pipe_config has
* pre-filled the pipe config. Note that intel_encoder->base.crtc must
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
int crtc_mask;
enum hpd_pin hpd_pin;
};
200,13 → 188,32
u8 polled;
};
 
typedef struct dpll {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
 
struct intel_crtc_config {
/**
* quirks - bitfield with hw state readout quirks
*
* For various reasons the hw state readout code might not be able to
* completely faithfully read out the current state. These cases are
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
 
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
/* This flag must be set by the encoder's compute_config callback if it
* changes the crtc timings in the mode to prevent the crtc fixup from
* overwriting them. Currently only lvds needs that. */
bool timings_set;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
224,29 → 231,68
/* DP has a bunch of special case unfortunately, so mark the pipe
* accordingly. */
bool has_dp_encoder;
 
/*
* Enable dithering, used when the selected pipe bpp doesn't match the
* plane bpp.
*/
bool dither;
 
/* Controls for the clock computation, to override various stages. */
bool clock_set;
 
/* SDVO TV has a bunch of special case. To make multifunction encoders
* work correctly, we need to track this at runtime.*/
bool sdvo_tv_clock;
 
/*
* crtc bandwidth limit, don't increase pipe bpp or clock if not really
* required. This is set in the 2nd loop of calling encoder's
* ->compute_config if the first pick doesn't work out.
*/
bool bw_constrained;
 
/* Settings for the intel dpll used on pretty much everything but
* haswell. */
struct dpll {
unsigned n;
unsigned m1, m2;
unsigned p1, p2;
} dpll;
struct dpll dpll;
 
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
 
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
 
int pipe_bpp;
struct intel_link_m_n dp_m_n;
/**
* This is currently used by DP and HDMI encoders since those can have a
* target pixel clock != the port link clock (which is currently stored
* in adjusted_mode->clock).
 
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode.
*/
int pixel_target_clock;
int port_clock;
 
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
 
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
u32 pgm_ratios;
u32 lvds_border_bits;
} gmch_pfit;
 
/* Panel fitter placement and size for Ironlake+ */
struct {
u32 pos;
u32 size;
bool enabled;
} pch_pfit;
 
/* FDI configuration, only valid if has_pch_encoder is set. */
int fdi_lanes;
struct intel_link_m_n fdi_m_n;
 
bool ips_enabled;
};
 
struct intel_crtc {
265,7 → 311,6
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
 
atomic_t unpin_work_count;
 
282,14 → 327,23
 
struct intel_crtc_config config;
 
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
 
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
 
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
};
 
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
};
 
struct intel_plane {
struct drm_plane base;
int plane;
302,7 → 356,16
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
 
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
* for the watermark calculations. Currently only Haswell uses this.
*/
struct intel_plane_wm_parameters wm;
 
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
309,7 → 372,8
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
341,66 → 405,6
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
 
#define DIP_HEADER_SIZE 5
 
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
 
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
#define DIP_LEN_SPD 25
#define DIP_SPD_UNKNOWN 0
#define DIP_SPD_DSTB 0x1
#define DIP_SPD_DVDP 0x2
#define DIP_SPD_DVHS 0x3
#define DIP_SPD_HDDVR 0x4
#define DIP_SPD_DVC 0x5
#define DIP_SPD_DSC 0x6
#define DIP_SPD_VCD 0x7
#define DIP_SPD_GAME 0x8
#define DIP_SPD_PC 0x9
#define DIP_SPD_BD 0xa
#define DIP_SPD_SCD 0xb
 
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
uint8_t len; /* HB2 - body len, not including checksum */
uint8_t ecc; /* Header ECC */
uint8_t checksum; /* PB0 */
union {
struct {
/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
uint8_t Y_A_B_S;
/* PB2 - C 7:6, M 5:4, R 3:0 */
uint8_t C_M_R;
/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
} __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
} __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
 
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
411,7 → 415,8
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
431,10 → 436,10
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
443,6 → 448,7
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
bool psr_setup_done;
struct intel_connector *attached_connector;
};
 
449,11 → 455,24
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 port_reversal;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
 
static inline int
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
return 0;
case PORT_C:
return 1;
default:
BUG();
}
}
 
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
481,13 → 500,6
bool enable_stall_check;
};
 
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
};
 
int intel_pch_rawclk(struct drm_device *dev);
 
int intel_connector_update_modes(struct drm_connector *connector,
497,6 → 509,7
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int hdmi_reg, enum port port);
505,19 → 518,19
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
535,7 → 548,6
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
545,14 → 557,16
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
 
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_panel_set_backlight(struct drm_device *dev,
u32 level, u32 max);
extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
568,19 → 582,15
bool mode_changed;
};
 
extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
extern void intel_plane_restore(struct drm_plane *plane);
extern void intel_plane_disable(struct drm_plane *plane);
 
 
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
588,13 → 598,6
return to_intel_connector(connector)->encoder;
}
 
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
 
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
601,6 → 604,11
return container_of(encoder, struct intel_digital_port, base.base);
}
 
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
}
 
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
630,6 → 638,7
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
 
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
642,12 → 651,10
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
 
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
 
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
658,6 → 665,7
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
677,6 → 685,22
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
 
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
683,13 → 707,9
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_suspend_hw(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
696,11 → 716,10
 
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
 
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
712,25 → 731,27
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
 
/* Power-related functions, located in intel_pm.c */
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
 
extern bool intel_using_power_well(struct drm_device *dev);
/* Power well */
extern int i915_init_power_well(struct drm_device *dev);
extern void i915_remove_power_well(struct drm_device *dev);
 
extern bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
 
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
742,7 → 763,7
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
751,5 → 772,31
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
extern void intel_display_handle_reset(struct drm_device *dev);
extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable);
extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
 
extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
extern void intel_edp_psr_update(struct drm_device *dev);
extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down);
extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
uint32_t mask);
extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
uint32_t mask);
extern void hsw_enable_pc8_work(struct work_struct *__work);
extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_dvo.c
54,6 → 54,13
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.dvo_reg = DVOA,
93,15 → 100,14
bool panel_wants_dither;
};
 
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_dvo, base.base);
return container_of(encoder, struct intel_dvo, base);
}
 
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dvo, base);
return enc_to_dvo(intel_attached_encoder(connector));
}
 
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
116,7 → 122,7
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
 
tmp = I915_READ(intel_dvo->dev.dvo_reg);
129,10 → 135,30
return true;
}
 
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
 
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & DVO_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
}
 
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
144,7 → 170,7
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
153,6 → 179,7
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
 
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
174,6 → 201,8
return;
}
 
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
intel_dvo->base.connectors_active = true;
 
211,11 → 240,11
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
 
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
234,26 → 263,28
C(vtotal);
C(clock);
#undef C
 
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
 
if (intel_dvo->dev.dev_ops->mode_fixup)
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
&pipe_config->requested_mode,
adjusted_mode);
 
return true;
}
 
static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_dvo_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
int pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = DPLL(pipe);
 
switch (dvo_reg) {
case DVOA:
268,7 → 299,9
break;
}
 
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode,
adjusted_mode);
 
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
284,8 → 317,6
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
 
I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
 
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
305,6 → 336,8
intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
 
342,11 → 375,6
kfree(connector);
}
 
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
};
 
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
362,7 → 390,7
 
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
 
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
440,6 → 468,9
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->mode_set = intel_dvo_mode_set;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
 
/* Now, try to find a controller */
506,9 → 537,6
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
 
drm_encoder_helper_add(&intel_encoder->base,
&intel_dvo_helper_funcs);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
/drivers/video/drm/i915/intel_fb.c
94,8 → 94,9
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
148,8 → 149,6
lfb_vm_node.start = 0;
lfb_vm_node.mm = NULL;
 
obj->gtt_space = &lfb_vm_node;
obj->gtt_offset = 0;
obj->pin_count = 2;
obj->cache_level = I915_CACHE_NONE;
obj->base.write_domain = 0;
164,7 → 163,7
goto out_unpin;
}
 
info->par = ifbdev;
info->par = helper;
 
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
189,13 → 188,12
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
 
info->screen_base = (void*) 0xFE000000;
info->screen_size = size;
 
// memset(info->screen_base, 0, size);
 
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
202,9 → 200,9
 
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
obj->gtt_offset, obj);
i915_gem_obj_ggtt_offset(obj), obj);
 
 
mutex_unlock(&dev->struct_mutex);
232,7 → 230,7
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
257,7 → 255,7
 
void intel_fbdev_initial_config(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
265,6 → 263,6
 
void intel_fb_output_poll_changed(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
/drivers/video/drm/i915/intel_hdmi.c
29,6 → 29,7
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
66,89 → 67,83
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
 
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
{
uint8_t *data = (uint8_t *)frame;
uint8_t sum = 0;
unsigned i;
 
frame->checksum = 0;
frame->ecc = 0;
 
for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
sum += data[i];
 
frame->checksum = 0x100 - sum;
}
 
static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_SELECT_SPD;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_SELECT_VENDOR;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
 
static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VENDOR;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
 
static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD_HSW;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VS_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
 
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
enum transcoder cpu_transcoder)
{
switch (frame->type) {
case DIP_TYPE_AVI:
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
case DIP_TYPE_SPD:
case HDMI_INFOFRAME_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
 
static void g4x_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i;
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
 
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
 
I915_WRITE(VIDEO_DIP_CTL, val);
 
162,7 → 157,7
I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
 
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
171,22 → 166,22
}
 
static void ibx_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
 
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
 
I915_WRITE(reg, val);
 
200,7 → 195,7
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
 
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
209,25 → 204,25
}
 
static void cpt_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
 
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
 
I915_WRITE(reg, val);
 
241,7 → 236,7
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
 
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
250,22 → 245,22
}
 
static void vlv_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val |= g4x_infoframe_index(type);
 
val &= ~g4x_infoframe_enable(frame);
val &= ~g4x_infoframe_enable(type);
 
I915_WRITE(reg, val);
 
279,7 → 274,7
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
 
val |= g4x_infoframe_enable(frame);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
288,7 → 283,8
}
 
static void hsw_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
295,14 → 291,16
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
 
data_reg = hsw_infoframe_data_reg(type,
intel_crtc->config.cpu_transcoder);
if (data_reg == 0)
return;
 
val &= ~hsw_infoframe_enable(frame);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
 
mmiowb();
315,18 → 313,48
I915_WRITE(data_reg + i, 0);
mmiowb();
 
val |= hsw_infoframe_enable(frame);
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
 
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
* at 0). It's also a byte used by DisplayPort so the same DIP registers can be
* used for both technologies.
*
* DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
* DW1: DB3 | DB2 | DB1 | DB0
* DW2: DB7 | DB6 | DB5 | DB4
* DW3: ...
*
* (HB is Header Byte, DB is Data Byte)
*
* The hdmi pack() functions don't know about that hardware specific hole so we
* trick them by giving an offset into the buffer and moving back the header
* bytes by one.
*/
static void intel_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
uint8_t buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
 
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
/* see comment above for the reason for this offset */
len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
if (len < 0)
return;
 
/* Insert the 'hole' (see big comment above) at position 3 */
buffer[0] = buffer[1];
buffer[1] = buffer[2];
buffer[2] = buffer[3];
buffer[3] = 0;
len++;
 
intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
}
 
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
334,42 → 362,59
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
union hdmi_infoframe frame;
int ret;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
 
if (intel_hdmi->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
 
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
 
intel_set_infoframe(encoder, &avi_if);
intel_write_infoframe(encoder, &frame);
}
 
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
union hdmi_infoframe frame;
int ret;
 
memset(&spd_if, 0, sizeof(spd_if));
spd_if.type = DIP_TYPE_SPD;
spd_if.ver = DIP_VERSION_SPD;
spd_if.len = DIP_LEN_SPD;
strcpy(spd_if.body.spd.vn, "Intel");
strcpy(spd_if.body.spd.pd, "Integrated gfx");
spd_if.body.spd.sdi = DIP_SPD_PC;
ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
if (ret < 0) {
DRM_ERROR("couldn't fill SPD infoframe\n");
return;
}
 
intel_set_infoframe(encoder, &spd_if);
frame.spd.sdi = HDMI_SPD_SDI_PC;
 
intel_write_infoframe(encoder, &frame);
}
 
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
union hdmi_infoframe frame;
int ret;
 
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
adjusted_mode);
if (ret < 0)
return;
 
intel_write_infoframe(encoder, &frame);
}
 
static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
432,6 → 477,7
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static void ibx_set_infoframes(struct drm_encoder *encoder,
493,6 → 539,7
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static void cpt_set_infoframes(struct drm_encoder *encoder,
528,6 → 575,7
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static void vlv_set_infoframes(struct drm_encoder *encoder,
562,6 → 610,7
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static void hsw_set_infoframes(struct drm_encoder *encoder,
589,20 → 638,20
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_hdmi_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 hdmi_val;
 
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
if (!HAS_PCH_SPLIT(dev))
hdmi_val |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
609,7 → 658,7
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
 
if (intel_crtc->config.pipe_bpp > 24)
if (crtc->config.pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
620,21 → 669,21
 
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
 
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
 
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
 
intel_hdmi->set_infoframes(encoder, adjusted_mode);
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
 
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
658,6 → 707,28
return true;
}
 
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp, flags = 0;
 
tmp = I915_READ(intel_hdmi->hdmi_reg);
 
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
}
 
static void intel_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
699,6 → 770,10
}
}
 
static void vlv_enable_hdmi(struct intel_encoder *encoder)
{
}
 
static void intel_disable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
755,10 → 830,22
}
}
 
static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
if (IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev))
return 300000;
else
return 225000;
}
 
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
775,6 → 862,9
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
 
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
794,16 → 884,31
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
* outputs.
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
pipe_config->pipe_bpp = 12*3;
if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
&& HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
 
/* Need to adjust the port link by 1.5x for 12bpc. */
pipe_config->port_clock = clock_12bpc;
} else {
DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
pipe_config->pipe_bpp = 8*3;
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
}
 
if (!pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
pipe_config->pipe_bpp = desired_bpp;
}
 
if (adjusted_mode->clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
 
return true;
}
 
819,6 → 924,9
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
955,6 → 1063,105
return 0;
}
 
static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
 
if (!IS_VALLEYVIEW(dev))
return;
 
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
 
/* HDMI 1.0V-2dB */
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
0x2b245f5f);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
0x5578b83a);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
0x0c782040);
vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
0x2b247878);
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
 
/* Program lane clock */
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
 
intel_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, port);
}
 
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
 
if (!IS_VALLEYVIEW(dev))
return;
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
 
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
 
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int port = vlv_dport_to_channel(dport);
 
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
962,10 → 1169,6
kfree(connector);
}
 
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_set = intel_hdmi_mode_set,
};
 
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
1070,7 → 1273,6
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1084,16 → 1286,23
}
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = intel_hdmi_post_disable;
} else {
intel_encoder->enable = intel_enable_hdmi;
}
 
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
/drivers/video/drm/i915/intel_i2c.c
398,6 → 398,7
int i, reg_offset;
int ret = 0;
 
intel_aux_display_runtime_get(dev_priv);
mutex_lock(&dev_priv->gmbus_mutex);
 
if (bus->force_bit) {
497,6 → 498,7
 
out:
mutex_unlock(&dev_priv->gmbus_mutex);
intel_aux_display_runtime_put(dev_priv);
return ret;
}
 
/drivers/video/drm/i915/intel_lvds.c
49,8 → 49,6
struct intel_lvds_encoder {
struct intel_encoder base;
 
u32 pfit_control;
u32 pfit_pgm_ratios;
bool is_dual_link;
u32 reg;
 
88,21 → 86,61
return true;
}
 
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
 
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
 
tmp = I915_READ(lvds_reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
flags |= DRM_MODE_FLAG_PHSYNC;
if (tmp & LVDS_VSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NVSYNC;
else
flags |= DRM_MODE_FLAG_PVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
 
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
tmp = I915_READ(PFIT_CONTROL);
 
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
}
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
int pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode =
&crtc->config.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
 
if (HAS_PCH_SPLIT(dev)) {
assert_fdi_rx_pll_disabled(dev_priv, pipe);
assert_shared_dpll_disabled(dev_priv,
intel_crtc_to_shared_dpll(crtc));
} else {
assert_pll_disabled(dev_priv, pipe);
}
 
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
118,7 → 156,8
}
 
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
temp &= ~LVDS_BORDER_ENABLE;
temp |= crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
136,43 → 175,22
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
if (INTEL_INFO(dev)->gen == 4) {
if (dev_priv->lvds_dither)
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (crtc->config.dither && crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
 
I915_WRITE(lvds_encoder->reg, temp);
}
 
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
return;
 
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
* adjusted whilst the pipe is disabled, according to
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
enc->pfit_control,
enc->pfit_pgm_ratios);
 
I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, enc->pfit_control);
}
 
/**
* Sets the power state for the panel.
*/
241,62 → 259,6
return MODE_OK;
}
 
static void
centre_horizontally(struct drm_display_mode *mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
 
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
 
border = (mode->hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
 
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
 
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
}
 
static void
centre_vertically(struct drm_display_mode *mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
 
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
 
border = (mode->vdisplay - height + 1) / 2;
 
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
 
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
}
 
static inline u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
* is defined, which can avoid the floating point computation
* when calculating the panel ratio.
*/
#define ACCURACY 12
#define FACTOR (1 << ACCURACY)
u32 ratio = source * FACTOR / target;
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
 
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
307,11 → 269,8
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
unsigned int lvds_bpp;
int pipe;
 
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
319,9 → 278,6
return false;
}
 
if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
 
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
328,11 → 284,12
else
lvds_bpp = 6*3;
 
if (lvds_bpp != pipe_config->pipe_bpp) {
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
 
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
345,140 → 302,15
if (HAS_PCH_SPLIT(dev)) {
pipe_config->has_pch_encoder = true;
 
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
 
/* 965+ wants fuzzy fitting */
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
 
/*
* Enable automatic panel scaling for non-native modes so that they fill
* the screen. Should be enabled before the pipe is enabled, according
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
for_each_pipe(pipe)
I915_WRITE(BCLRPAT(pipe), 0);
 
drm_mode_set_crtcinfo(adjusted_mode, 0);
pipe_config->timings_set = true;
 
switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
 
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
 
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
 
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
 
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
break;
 
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_AUTO_SCALE |
HORIZ_INTERP_BILINEAR);
}
break;
 
default:
break;
}
 
out:
/* If not enabling scaling, be consistent and always use 0. */
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
 
/* Make sure pre-965 set dither correctly */
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 
if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
}
dev_priv->lvds_border_bits = border;
 
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
* user's requested refresh rate.
487,14 → 319,12
return true;
}
 
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_lvds_mode_set(struct intel_encoder *encoder)
{
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
* We don't do anything here, the LVDS port is fully set up in the pre
* enable hook - the ordering constraints for enabling the lvds port vs.
* enabling the display pll are too strict.
*/
}
 
511,6 → 341,9
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
return status;
672,10 → 505,6
return 0;
}
 
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_set = intel_lvds_mode_set,
};
 
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
869,6 → 698,22
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D510MO",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D525MW",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
 
{ } /* terminating entry */
};
937,11 → 782,11
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
if (!dev_priv->child_dev_num)
if (!dev_priv->vbt.child_dev_num)
return true;
 
for (i = 0; i < dev_priv->child_dev_num; i++) {
struct child_device_config *child = dev_priv->child_dev + i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
struct child_device_config *child = dev_priv->vbt.child_dev + i;
 
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
979,6 → 824,19
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
return 1;
}
 
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
{ } /* terminating entry */
};
 
bool intel_is_dual_link_lvds(struct drm_device *dev)
{
struct intel_encoder *encoder;
1016,7 → 874,7
*/
val = I915_READ(lvds_encoder->reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
val = dev_priv->vbt.bios_lvds_val;
 
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
1043,7 → 901,7
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
bool intel_lvds_init(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder;
1061,7 → 919,7
u8 pin;
 
if (!intel_lvds_supported(dev))
return false;
return;
 
/* Skip init on machines we know falsely report LVDS */
// if (dmi_check_system(intel_no_lvds))
1070,34 → 928,30
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return false;
return;
}
 
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return false;
if (dev_priv->edp.support) {
return;
if (dev_priv->vbt.edp_support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return false;
return;
}
}
 
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return false;
return;
 
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false;
return;
}
 
lvds_encoder->attached_connector = lvds_connector;
 
if (!HAS_PCH_SPLIT(dev)) {
lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
 
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
1110,10 → 964,11
 
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
1127,7 → 982,6
else
intel_encoder->crtc_mask = (1 << 1);
 
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
1199,11 → 1053,11
}
 
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
if (dev_priv->vbt.lfp_lvds_vbt_mode) {
DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
 
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
1264,7 → 1118,7
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
 
return true;
return;
 
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1274,5 → 1128,5
drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false;
return;
}
/drivers/video/drm/i915/intel_opregion.c
112,6 → 112,10
u8 rsvd[102];
} __attribute__((packed));
 
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
 
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
125,6 → 129,12
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
 
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
#define ASLE_TCHE_BLC_EN (1 << 1)
#define ASLE_TCHE_PFIT_EN (1 << 2)
#define ASLE_TCHE_PFMB_EN (1 << 3)
 
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
197,6 → 207,8
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
 
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
}
 
return 0;
/drivers/video/drm/i915/intel_panel.c
36,32 → 36,26
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
 
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
adjusted_mode->hdisplay = fixed_mode->hdisplay;
adjusted_mode->hsync_start = fixed_mode->hsync_start;
adjusted_mode->hsync_end = fixed_mode->hsync_end;
adjusted_mode->htotal = fixed_mode->htotal;
drm_mode_copy(adjusted_mode, fixed_mode);
 
adjusted_mode->vdisplay = fixed_mode->vdisplay;
adjusted_mode->vsync_start = fixed_mode->vsync_start;
adjusted_mode->vsync_end = fixed_mode->vsync_end;
adjusted_mode->vtotal = fixed_mode->vtotal;
 
adjusted_mode->clock = fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
 
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *mode, *adjusted_mode;
int x, y, width, height;
 
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
 
x = y = width = height = 0;
 
/* Native modes don't need fitting */
104,19 → 98,212
}
break;
 
default:
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
break;
 
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
}
 
done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height;
pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
}
 
static void
centre_horizontally(struct drm_display_mode *mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
 
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
 
border = (mode->hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
 
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
 
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
}
 
static void
centre_vertically(struct drm_display_mode *mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
 
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
 
border = (mode->vdisplay - height + 1) / 2;
 
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
 
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
}
 
static inline u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
* is defined, which can avoid the floating point computation
* when calculating the panel ratio.
*/
#define ACCURACY 12
#define FACTOR (1 << ACCURACY)
u32 ratio = source * FACTOR / target;
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
 
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *mode, *adjusted_mode;
 
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
 
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
adjusted_mode->vdisplay;
 
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE |
PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
mode->vdisplay);
 
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
mode->hdisplay);
 
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else {
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
}
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_AUTO_SCALE |
HORIZ_INTERP_BILINEAR);
}
break;
default:
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
return;
}
 
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
 
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
 
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
}
 
static int is_backlight_combination_mode(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
130,11 → 317,16
return 0;
}
 
/* XXX: query mode clock or hardware clock and program max PWM appropriately
* when it's 0.
*/
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
// WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
 
/* Restore the CTL value if it lost, e.g. GPU reset */
 
if (HAS_PCH_SPLIT(dev_priv->dev)) {
164,7 → 356,7
return val;
}
 
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
static u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
 
182,23 → 374,8
max *= 0xff;
}
 
return max;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
 
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
 
max = _intel_panel_get_max_backlight(dev);
if (max == 0) {
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
 
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
 
217,8 → 394,11
return val;
 
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
return intel_panel_get_max_backlight(dev) - val;
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
u32 max = intel_panel_get_max_backlight(dev);
if (max)
return max - val;
}
 
return val;
}
227,7 → 407,10
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
244,6 → 427,9
}
 
val = intel_panel_compute_brightness(dev, val);
 
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
270,6 → 456,10
u32 max = intel_panel_get_max_backlight(dev);
u8 lbpc;
 
/* we're screwed, but keep behaviour backwards compatible */
if (!max)
max = 1;
 
lbpc = level * 0xfe / max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
282,7 → 472,8
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
 
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
/* set backlight brightness to level in range [0..max] */
void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
297,7 → 488,10
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
dev_priv->backlight.enabled = false;
intel_panel_actually_set_backlight(dev, 0);
 
314,6 → 508,8
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
 
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
 
void intel_panel_enable_backlight(struct drm_device *dev,
320,7 → 516,12
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
if (dev_priv->backlight.level == 0) {
dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
// if (dev_priv->backlight.device)
347,7 → 548,10
else
tmp &= ~BLM_PIPE_SELECT;
 
tmp |= BLM_PIPE(pipe);
if (cpu_transcoder == TRANSCODER_EDP)
tmp |= BLM_TRANSCODER_EDP;
else
tmp |= BLM_PIPE(cpu_transcoder);
tmp &= ~BLM_PWM_ENABLE;
 
I915_WRITE(reg, tmp);
354,7 → 558,8
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
 
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
369,6 → 574,8
*/
dev_priv->backlight.enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
 
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
}
 
static void intel_panel_init_backlight(struct drm_device *dev)
405,7 → 612,8
static int intel_panel_update_status(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
intel_panel_set_backlight(dev, bd->props.brightness);
intel_panel_set_backlight(dev, bd->props.brightness,
bd->props.max_brightness);
return 0;
}
 
425,6 → 633,7
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
unsigned long flags;
 
intel_panel_init_backlight(dev);
 
434,7 → 643,11
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.brightness = dev_priv->backlight.level;
props.max_brightness = _intel_panel_get_max_backlight(dev);
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
props.max_brightness = intel_panel_get_max_backlight(dev);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
if (props.max_brightness == 0) {
DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;
/drivers/video/drm/i915/intel_pm.c
37,16 → 37,7
#define assert_spin_locked(x)
 
void getrawmonotonic(struct timespec *ts);
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
 
static inline struct timespec timespec_sub(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}
 
 
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
102,7 → 93,7
int plane, i;
u32 fbc_ctl, fbc_ctl2;
 
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
129,8 → 120,8
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
cfb_pitch, crtc->y, intel_crtc->plane);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
 
static bool i8xx_fbc_enabled(struct drm_device *dev)
164,7 → 155,7
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
 
static void g4x_disable_fbc(struct drm_device *dev)
233,7 → 224,7
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
244,7 → 235,7
sandybridge_blit_fbc_update(dev);
}
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
 
static void ironlake_disable_fbc(struct drm_device *dev)
258,6 → 249,18
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
if (IS_IVYBRIDGE(dev))
/* WaFbcDisableDpfcClockGating:ivb */
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) &
~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
 
if (IS_HASWELL(dev))
/* WaFbcDisableDpfcClockGating:hsw */
I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
~HSW_DPFC_GATING_DISABLE);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
269,6 → 272,47
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
 
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
 
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
/* WaFbcDisableDpfcClockGating:ivb */
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
HSW_BYPASS_FBC_QUEUE);
/* WaFbcDisableDpfcClockGating:hsw */
I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
HSW_DPFC_GATING_DISABLE);
}
 
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
 
sandybridge_blit_fbc_update(dev);
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
 
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
288,7 → 332,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev->struct_mutex);
if (work == dev_priv->fbc_work) {
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
296,12 → 340,12
dev_priv->display.enable_fbc(work->crtc,
work->interval);
 
dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
dev_priv->cfb_fb = work->crtc->fb->base.id;
dev_priv->cfb_y = work->crtc->y;
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
 
dev_priv->fbc_work = NULL;
dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
 
310,13 → 354,13
 
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc_work == NULL)
if (dev_priv->fbc.fbc_work == NULL)
return;
 
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc_work, so we can perform the cancellation
* dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
// if (cancel_delayed_work(&dev_priv->fbc_work->work))
328,10 → 372,10
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc_work = NULL;
dev_priv->fbc.fbc_work = NULL;
}
 
void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
344,6 → 388,7
 
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
return;
}
353,7 → 398,7
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
dev_priv->fbc_work = work;
dev_priv->fbc.fbc_work = work;
 
DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
 
367,6 → 412,8
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
381,9 → 428,19
return;
 
dev_priv->display.disable_fbc(dev);
dev_priv->cfb_plane = -1;
dev_priv->fbc.plane = -1;
}
 
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return false;
 
dev_priv->fbc.no_fbc_reason = reason;
return true;
}
 
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
394,7 → 451,7
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= 2048 in width, 1536 in height
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
411,15 → 468,18
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
unsigned int max_hdisplay, max_vdisplay;
 
ENTER();
 
if (!i915_powersave)
if (!I915_HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
 
if (!I915_HAS_FBC(dev))
if (!i915_powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
}
 
/*
* If FBC is already on, we just have to verify that we can
434,8 → 494,8
if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
443,8 → 503,8
}
 
if (!crtc || crtc->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
 
453,34 → 513,42
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
enable_fbc = i915_enable_fbc;
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
if (i915_enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
if (!enable_fbc) {
if (!i915_enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((crtc->mode.hdisplay > 2048) ||
(crtc->mode.vdisplay > 1536)) {
 
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_hdisplay = 4096;
max_vdisplay = 2048;
} else {
max_hdisplay = 2048;
max_vdisplay = 1536;
}
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
 
489,8 → 557,8
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
 
499,10 → 567,8
goto out_disable;
 
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
 
511,9 → 577,9
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_fb == fb->base.id &&
dev_priv->cfb_y == crtc->y)
if (dev_priv->fbc.plane == intel_crtc->plane &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
 
if (intel_fbc_enabled(dev)) {
545,8 → 611,7
}
 
intel_enable_fbc(crtc, 500);
LEAVE();
 
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
 
out_disable:
556,7 → 621,6
intel_disable_fbc(dev);
}
i915_gem_stolen_cleanup_compression(dev);
LEAVE();
}
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
1626,9 → 1690,6
I915_WRITE(FW_BLC, fwater_lo);
}
 
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
 
/*
* Check the wm result.
*
1654,6 → 1715,10
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
} else if (INTEL_INFO(dev)->gen >= 6) {
/* enable FBC WM (except on ILK, where it must remain off) */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
}
 
if (display_wm > display->max_wm) {
1739,9 → 1804,9
enabled = 0;
if (g4x_compute_wm0(dev, PIPE_A,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1753,9 → 1818,9
 
if (g4x_compute_wm0(dev, PIPE_B,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1779,7 → 1844,7
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
ILK_READ_WM1_LATENCY() * 500,
dev_priv->wm.pri_latency[1] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
1787,7 → 1852,7
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
1794,7 → 1859,7
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
ILK_READ_WM2_LATENCY() * 500,
dev_priv->wm.pri_latency[2] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
1802,7 → 1867,7
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
1816,7 → 1881,7
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
1871,7 → 1936,7
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
1879,7 → 1944,7
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
1886,7 → 1951,7
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
1894,7 → 1959,7
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
1901,7 → 1966,7
 
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
1909,7 → 1974,7
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
1918,7 → 1983,7
static void ivybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1988,7 → 2053,7
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
1996,7 → 2061,7
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
2003,7 → 2068,7
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
2011,7 → 2076,7
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
2018,12 → 2083,12
 
/* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &ignore_cursor_wm) ||
!ironlake_compute_srwm(dev, 3, enabled,
2 * SNB_READ_WM3_LATENCY() * 500,
dev_priv->wm.cur_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2031,39 → 2096,802
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
 
static void
haswell_update_linetime_wm(struct drm_device *dev, int pipe,
struct drm_display_mode *mode)
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
 
pixel_rate = intel_crtc->config.adjusted_mode.clock;
 
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
 
if (intel_crtc->config.pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
pipe_w = intel_crtc->config.requested_mode.hdisplay;
pipe_h = intel_crtc->config.requested_mode.vdisplay;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
pipe_w = pfit_w;
if (pipe_h < pfit_h)
pipe_h = pfit_h;
 
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
 
return pixel_rate;
}
 
/* latency must be in 0.1us units. */
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint64_t ret;
 
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
 
ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
 
return ret;
}
 
/* latency must be in 0.1us units. */
static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint32_t ret;
 
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
 
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
ret = DIV_ROUND_UP(ret, 64) + 2;
return ret;
}
 
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t bytes_per_pixel)
{
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
 
struct hsw_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate;
struct intel_plane_wm_parameters pri;
struct intel_plane_wm_parameters spr;
struct intel_plane_wm_parameters cur;
};
 
struct hsw_wm_maximums {
uint16_t pri;
uint16_t spr;
uint16_t cur;
uint16_t fbc;
};
 
struct hsw_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
uint32_t wm_linetime[3];
bool enable_fbc_wm;
};
 
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
bool fbc_wm_enabled;
};
 
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
uint32_t method1, method2;
 
if (!params->active || !params->pri.enabled)
return 0;
 
method1 = ilk_wm_method1(params->pixel_rate,
params->pri.bytes_per_pixel,
mem_value);
 
if (!is_lp)
return method1;
 
method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
params->pri.horiz_pixels,
params->pri.bytes_per_pixel,
mem_value);
 
return min(method1, method2);
}
 
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
 
if (!params->active || !params->spr.enabled)
return 0;
 
method1 = ilk_wm_method1(params->pixel_rate,
params->spr.bytes_per_pixel,
mem_value);
method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
params->spr.horiz_pixels,
params->spr.bytes_per_pixel,
mem_value);
return min(method1, method2);
}
 
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
return 0;
 
return ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
params->cur.horiz_pixels,
params->cur.bytes_per_pixel,
mem_value);
}
 
/* Only for WM_LP. */
static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
return 0;
 
return ilk_wm_fbc(pri_val,
params->pri.horiz_pixels,
params->pri.bytes_per_pixel);
}
 
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen >= 7)
return 768;
else
return 512;
}
 
/* Calculate the maximum primary/sprite plane watermark */
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
unsigned int fifo_size = ilk_display_fifo_size(dev);
unsigned int max;
 
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
return 0;
 
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
fifo_size /= INTEL_INFO(dev)->num_pipes;
 
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
if (INTEL_INFO(dev)->gen <= 6)
fifo_size /= 2;
}
 
if (config->sprites_enabled) {
/* level 0 is always calculated with 1:1 split */
if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
if (is_sprite)
fifo_size *= 5;
fifo_size /= 6;
} else {
fifo_size /= 2;
}
}
 
/* clamp to max that the registers can hold */
if (INTEL_INFO(dev)->gen >= 7)
/* IVB/HSW primary/sprite plane watermarks */
max = level == 0 ? 127 : 1023;
else if (!is_sprite)
/* ILK/SNB primary plane watermarks */
max = level == 0 ? 127 : 511;
else
/* ILK/SNB sprite plane watermarks */
max = level == 0 ? 63 : 255;
 
return min(fifo_size, max);
}
 
/* Calculate the maximum cursor plane watermark */
static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
int level,
const struct intel_wm_config *config)
{
/* HSW LP1+ watermarks w/ multiple pipes */
if (level > 0 && config->num_pipes_active > 1)
return 64;
 
/* otherwise just report max that registers can hold */
if (INTEL_INFO(dev)->gen >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
 
/* Calculate the maximum FBC watermark */
static unsigned int ilk_fbc_wm_max(void)
{
/* max that registers can hold */
return 15;
}
 
static void ilk_wm_max(struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct hsw_wm_maximums *max)
{
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
max->fbc = ilk_fbc_wm_max();
}
 
static bool ilk_check_wm(int level,
const struct hsw_wm_maximums *max,
struct intel_wm_level *result)
{
bool ret;
 
/* already determined to be invalid? */
if (!result->enable)
return false;
 
result->enable = result->pri_val <= max->pri &&
result->spr_val <= max->spr &&
result->cur_val <= max->cur;
 
ret = result->enable;
 
/*
* HACK until we can pre-compute everything,
* and thus fail gracefully if LP0 watermarks
* are exceeded...
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
 
result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
result->enable = true;
}
 
DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
 
return ret;
}
 
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
int level,
struct hsw_pipe_wm_parameters *p,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
uint16_t spr_latency = dev_priv->wm.spr_latency[level];
uint16_t cur_latency = dev_priv->wm.cur_latency[level];
 
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
pri_latency *= 5;
spr_latency *= 5;
cur_latency *= 5;
}
 
result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
result->spr_val = ilk_compute_spr_wm(p, spr_latency);
result->cur_val = ilk_compute_cur_wm(p, cur_latency);
result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
result->enable = true;
}
 
static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
int level, struct hsw_wm_maximums *max,
struct hsw_pipe_wm_parameters *params,
struct intel_wm_level *result)
{
enum pipe pipe;
struct intel_wm_level res[3];
 
for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
 
result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
result->enable = true;
 
return ilk_check_wm(level, max, result);
}
 
static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe,
struct hsw_pipe_wm_parameters *params)
{
uint32_t pri_val, cur_val, spr_val;
/* WM0 latency values stored in 0.1us units */
uint16_t pri_latency = dev_priv->wm.pri_latency[0];
uint16_t spr_latency = dev_priv->wm.spr_latency[0];
uint16_t cur_latency = dev_priv->wm.cur_latency[0];
 
pri_val = ilk_compute_pri_wm(params, pri_latency, false);
spr_val = ilk_compute_spr_wm(params, spr_latency);
cur_val = ilk_compute_cur_wm(params, cur_latency);
 
WARN(pri_val > 127,
"Primary WM error, mode not supported for pipe %c\n",
pipe_name(pipe));
WARN(spr_val > 127,
"Sprite WM error, mode not supported for pipe %c\n",
pipe_name(pipe));
WARN(cur_val > 63,
"Cursor WM error, mode not supported for pipe %c\n",
pipe_name(pipe));
 
return (pri_val << WM0_PIPE_PLANE_SHIFT) |
(spr_val << WM0_PIPE_SPRITE_SHIFT) |
cur_val;
}
 
static uint32_t
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
u32 linetime, ips_linetime;
 
temp = I915_READ(PIPE_WM_LINETIME(pipe));
temp &= ~PIPE_WM_LINETIME_MASK;
if (!intel_crtc_active(crtc))
return 0;
 
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
temp |= PIPE_WM_LINETIME_TIME(
((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv));
 
/* IPS watermarks are only used by pipe A, and are ignored by
* pipes B and C. They are calculated similarly to the common
* linetime values, except that we are using CD clock frequency
* in MHz instead of pixel rate for the division.
*
* This is a placeholder for the IPS watermark calculation code.
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
}
 
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
wm[0] = sskpd & 0xF;
wm[1] = (sskpd >> 4) & 0xFF;
wm[2] = (sskpd >> 12) & 0xFF;
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_INFO(dev)->gen >= 6) {
uint32_t sskpd = I915_READ(MCH_SSKPD);
 
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_INFO(dev)->gen >= 5) {
uint32_t mltr = I915_READ(MLTR_ILK);
 
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
}
}
 
static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
if (INTEL_INFO(dev)->gen == 5)
wm[0] = 13;
}
 
static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
if (INTEL_INFO(dev)->gen == 5)
wm[0] = 13;
 
/* WaDoubleCursorLP3Latency:ivb */
if (IS_IVYBRIDGE(dev))
wm[3] *= 2;
}
 
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
const uint16_t wm[5])
{
int level, max_level;
 
/* how many WM levels are we expecting */
if (IS_HASWELL(dev))
max_level = 4;
else if (INTEL_INFO(dev)->gen >= 6)
max_level = 3;
else
max_level = 2;
 
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
 
if (latency == 0) {
DRM_ERROR("%s WM%d latency not provided\n",
name, level);
continue;
}
 
/* WM1+ latency values in 0.5us units */
if (level > 0)
latency *= 5;
 
DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
name, level, wm[level],
latency / 10, latency % 10);
}
}
 
static void intel_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
 
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
 
intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
 
intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
 
static void hsw_compute_wm_parameters(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
struct hsw_wm_maximums *lp_max_1_2,
struct hsw_wm_maximums *lp_max_5_6)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
enum pipe pipe;
struct intel_wm_config config = {};
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct hsw_pipe_wm_parameters *p;
 
pipe = intel_crtc->pipe;
p = &params[pipe];
 
p->active = intel_crtc_active(crtc);
if (!p->active)
continue;
 
config.num_pipes_active++;
 
p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
p->pri.horiz_pixels =
intel_crtc->config.requested_mode.hdisplay;
p->cur.horiz_pixels = 64;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
p->cur.enabled = true;
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
struct hsw_pipe_wm_parameters *p;
 
pipe = intel_plane->pipe;
p = &params[pipe];
 
p->spr = intel_plane->wm;
 
config.sprites_enabled |= p->spr.enabled;
config.sprites_scaled |= p->spr.scaled;
}
 
ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
 
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
else
*lp_max_5_6 = *lp_max_1_2;
}
 
static void hsw_compute_wm_results(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
struct hsw_wm_maximums *lp_maximums,
struct hsw_wm_values *results)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_wm_level lp_results[4] = {};
enum pipe pipe;
int level, max_level, wm_lp;
 
for (level = 1; level <= 4; level++)
if (!hsw_compute_lp_wm(dev_priv, level,
lp_maximums, params,
&lp_results[level - 1]))
break;
max_level = level - 1;
 
memset(results, 0, sizeof(*results));
 
/* The spec says it is preferred to disable FBC WMs instead of disabling
* a WM level. */
results->enable_fbc_wm = true;
for (level = 1; level <= max_level; level++) {
if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
results->enable_fbc_wm = false;
lp_results[level - 1].fbc_val = 0;
}
}
 
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
const struct intel_wm_level *r;
 
level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
if (level > max_level)
break;
 
r = &lp_results[level - 1];
results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
r->fbc_val,
r->pri_val,
r->cur_val);
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
}
 
for_each_pipe(pipe)
results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
&params[pipe]);
 
for_each_pipe(pipe) {
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
}
}
 
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
struct hsw_wm_values *r2)
{
int i, val_r1 = 0, val_r2 = 0;
 
for (i = 0; i < 3; i++) {
if (r1->wm_lp[i] & WM3_LP_EN)
val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
if (r2->wm_lp[i] & WM3_LP_EN)
val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
}
 
if (val_r1 == val_r2) {
if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
return r2;
else
return r1;
} else if (val_r1 > val_r2) {
return r1;
} else {
return r2;
}
}
 
/*
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
struct hsw_wm_values *results,
enum intel_ddb_partitioning partitioning)
{
struct hsw_wm_values previous;
uint32_t val;
enum intel_ddb_partitioning prev_partitioning;
bool prev_enable_fbc_wm;
 
I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
 
prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
 
if (memcmp(results->wm_pipe, previous.wm_pipe,
sizeof(results->wm_pipe)) == 0 &&
memcmp(results->wm_lp, previous.wm_lp,
sizeof(results->wm_lp)) == 0 &&
memcmp(results->wm_lp_spr, previous.wm_lp_spr,
sizeof(results->wm_lp_spr)) == 0 &&
memcmp(results->wm_linetime, previous.wm_linetime,
sizeof(results->wm_linetime)) == 0 &&
partitioning == prev_partitioning &&
results->enable_fbc_wm == prev_enable_fbc_wm)
return;
 
if (previous.wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, 0);
if (previous.wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, 0);
if (previous.wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, 0);
 
if (previous.wm_pipe[0] != results->wm_pipe[0])
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
if (previous.wm_pipe[1] != results->wm_pipe[1])
I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
if (previous.wm_pipe[2] != results->wm_pipe[2])
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
if (previous.wm_linetime[0] != results->wm_linetime[0])
I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
if (previous.wm_linetime[1] != results->wm_linetime[1])
I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
if (previous.wm_linetime[2] != results->wm_linetime[2])
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
if (prev_partitioning != partitioning) {
val = I915_READ(WM_MISC);
if (partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
I915_WRITE(WM_MISC, val);
}
 
if (prev_enable_fbc_wm != results->enable_fbc_wm) {
val = I915_READ(DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
else
val |= DISP_FBC_WM_DIS;
I915_WRITE(DISP_ARB_CTL, val);
}
 
if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
if (results->wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
if (results->wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
if (results->wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
}
 
static void haswell_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
struct hsw_pipe_wm_parameters params[3];
struct hsw_wm_values results_1_2, results_5_6, *best_results;
enum intel_ddb_partitioning partitioning;
 
hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
 
hsw_compute_wm_results(dev, params,
&lp_max_1_2, &results_1_2);
if (lp_max_1_2.pri != lp_max_5_6.pri) {
hsw_compute_wm_results(dev, params,
&lp_max_5_6, &results_5_6);
best_results = hsw_find_best_result(&results_1_2, &results_5_6);
} else {
best_results = &results_1_2;
}
 
partitioning = (best_results == &results_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
hsw_write_wm_values(dev_priv, best_results, partitioning);
}
 
static void haswell_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
 
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
 
haswell_update_wm(plane->dev);
}
 
static bool
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
2140,15 → 2968,22
return *sprite_wm > 0x3ff ? false : true;
}
 
static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
static void sandybridge_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int pipe = to_intel_plane(plane)->pipe;
int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
u32 val;
int sprite_wm, reg;
int ret;
 
if (!enabled)
return;
 
switch (pipe) {
case 0:
reg = WM0_PIPEA_ILK;
2167,8 → 3002,8
&sandybridge_display_wm_info,
latency, &sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
pipe_name(pipe));
return;
}
 
2175,17 → 3010,17
val = I915_READ(reg);
val &= ~WM0_PIPE_SPRITE_MASK;
I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
 
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM1_LATENCY() * 500,
dev_priv->wm.spr_latency[1] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM1S_LP_ILK, sprite_wm);
2197,11 → 3032,11
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM2_LATENCY() * 500,
dev_priv->wm.spr_latency[2] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM2S_LP_IVB, sprite_wm);
2209,11 → 3044,11
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM3_LATENCY() * 500,
dev_priv->wm.spr_latency[3] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
pipe);
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM3S_LP_IVB, sprite_wm);
2259,23 → 3094,16
dev_priv->display.update_wm(dev);
}
 
void intel_update_linetime_watermarks(struct drm_device *dev,
int pipe, struct drm_display_mode *mode)
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
if (dev_priv->display.update_linetime_wm)
dev_priv->display.update_linetime_wm(dev, pipe, mode);
}
 
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
pixel_size);
dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
pixel_size, enabled, scaled);
}
 
static struct drm_i915_gem_object *
2292,7 → 3120,7
return NULL;
}
 
ret = i915_gem_object_pin(ctx, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
2502,28 → 3330,107
trace_intel_gpu_freq_change(val * 50);
}
 
static void gen6_disable_rps(struct drm_device *dev)
/*
* Wait until the previous freq change has completed,
* or the timeout elapsed, and then update our notion
* of the current GPU frequency.
*/
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
{
u32 pval;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
 
pval >>= 8;
 
if (pval != dev_priv->rps.cur_delay)
DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
 
dev_priv->rps.cur_delay = pval;
}
 
void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
gen6_rps_limits(dev_priv, &val);
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
 
vlv_update_rps_cur_delay(dev_priv);
 
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
vlv_gpu_freq(dev_priv->mem_freq, val), val);
 
if (val == dev_priv->rps.cur_delay)
return;
 
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 
dev_priv->rps.cur_delay = val;
 
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
}
 
static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
spin_lock_irq(&dev_priv->rps.lock);
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->rps.lock);
spin_unlock_irq(&dev_priv->irq_lock);
 
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
}
 
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 
gen6_disable_rps_interrupts(dev);
}
 
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
 
gen6_disable_rps_interrupts(dev);
 
if (dev_priv->vlv_pctx) {
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
}
 
int intel_enable_rc6(const struct drm_device *dev)
{
/* No RC6 before Ironlake */
if (INTEL_INFO(dev)->gen < 5)
return 0;
 
/* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
2547,6 → 3454,29
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
 
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enabled_intrs;
 
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
 
/* only unmask PM interrupts we need. Mask all others. */
enabled_intrs = GEN6_PM_RPS_EVENTS;
 
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
 
I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
 
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2598,6 → 3528,9
 
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2675,14 → 3608,7
 
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
spin_lock_irq(&dev_priv->rps.lock);
WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
gen6_enable_rps_interrupts(dev);
 
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
2701,7 → 3627,7
gen6_gt_force_wake_put(dev_priv);
}
 
static void gen6_update_ring_freq(struct drm_device *dev)
void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
2763,6 → 3689,198
}
}
 
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
 
val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
 
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
rp0 = min_t(u32, rp0, 0xea);
 
return rp0;
}
 
static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
{
u32 val, rpe;
 
val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
 
return rpe;
}
 
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
 
static void vlv_rps_timer_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.vlv_work.work);
 
/*
* Timer fired, we must be idle. Drop to min voltage state.
* Note: we use RPe here since it should match the
* Vmin we were shooting for. That should give us better
* perf when we come back out of RC6 than if we used the
* min freq available.
*/
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *pctx;
unsigned long pctx_paddr;
u32 pcbr;
int pctx_size = 24*1024;
 
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
int pcbr_offset;
 
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
pcbr_offset,
I915_GTT_OFFSET_NONE,
pctx_size);
goto out;
}
 
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
* proper allocation within Gfx stolen memory. For example, this
* register should be programmed such than the PCBR range does not
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
pctx = i915_gem_object_create_stolen(dev, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
return;
}
 
pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
I915_WRITE(VLV_PCBR, pctx_paddr);
 
out:
dev_priv->vlv_pctx = pctx;
}
 
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 gtfifodbg, val;
int i;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
valleyview_setup_pctx(dev);
 
gen6_gt_force_wake_get(dev_priv);
 
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
 
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
 
/* allows RC6 residency counter to work */
I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
switch ((val >> 6) & 3) {
case 0:
case 1:
dev_priv->mem_freq = 800;
break;
case 2:
dev_priv->mem_freq = 1066;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_delay = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay);
 
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.max_delay),
dev_priv->rps.max_delay);
 
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
 
dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.min_delay),
dev_priv->rps.min_delay);
 
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
gen6_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv);
}
 
void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2852,7 → 3970,7
 
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
2875,7 → 3993,7
return;
}
 
I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
 
3486,6 → 4604,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(dev->irq_enabled);
 
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
3492,6 → 4613,9
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
// cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
3507,8 → 4631,13
ENTER();
 
mutex_lock(&dev_priv->rps.hw_lock);
 
if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else {
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
LEAVE();
3522,7 → 4651,7
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
3545,12 → 4674,28
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
 
static void g4x_disable_trickle_feed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
}
 
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
/* Required for FBC */
/*
* Required for FBC
* WaFbcDisableDpfcClockGating:ilk
*/
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
3587,6 → 4732,7
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
3604,10 → 4750,12
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
 
/* WaDisableRenderCachePipelinedFlush */
/* WaDisableRenderCachePipelinedFlush:ilk */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
g4x_disable_trickle_feed(dev);
 
ibx_init_clock_gating(dev);
}
 
3632,7 → 4780,7
val = I915_READ(TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->fdi_rx_polarity_inverted)
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
3662,7 → 4810,6
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3671,11 → 4818,11
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
 
/* WaDisableHiZPlanesWhenMSAAEnabled */
/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
I915_WRITE(_3D_CHICKEN,
_MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 
/* WaSetupGtModeTdRowDispatch */
/* WaSetupGtModeTdRowDispatch:snb */
if (IS_SNB_GT1(dev))
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
3702,8 → 4849,8
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
* Also apply WaDisableVDSUnitClockGating and
* WaDisableRCPBUnitClockGating.
* Also apply WaDisableVDSUnitClockGating:snb and
* WaDisableRCPBUnitClockGating:snb.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3722,6 → 4869,8
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
*
* WaFbcAsynchFlipDisableFbcQueue:snb
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
3734,17 → 4883,8
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
g4x_disable_trickle_feed(dev);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
 
/* The default value should be 0x200 according to docs, but the two
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
3764,7 → 4904,6
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
 
/* WaVSRefCountFullforceMissDisable */
if (IS_HASWELL(dev_priv->dev))
reg &= ~GEN7_FF_VS_REF_CNT_FFME;
 
3783,12 → 4922,28
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
 
/* WADPOClockGatingDisable:hsw */
I915_WRITE(_TRANSA_CHICKEN1,
I915_READ(_TRANSA_CHICKEN1) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
 
static void lpt_suspend_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
 
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
3795,53 → 4950,43
I915_WRITE(WM1_LP_ILK, 0);
 
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
* This implements the WaDisableRCZUnitClockGating:hsw workaround.
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
/* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
/* WaApplyL3ControlAndL3ChickenMode:hsw */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
 
/* This is required by WaCatErrorRejectionIssue */
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
I915_WRITE(HSW_ROW_CHICKEN3,
_MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
 
/* This is required by WaCatErrorRejectionIssue:hsw */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
 
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
 
/* WaDisable4x2SubspanOptimization */
/* WaDisable4x2SubspanOptimization:hsw */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
/* WaSwitchSolVfFArbitrationPriority */
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
I915_WRITE(WM_DBG,
I915_READ(WM_DBG) |
WM_DBG_DISALLOW_MULTIPLE_LP |
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
/* WaRsPkgCStateDisplayPMReq:hsw */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
 
lpt_init_clock_gating(dev);
}
3849,7 → 4994,6
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t snpcr;
 
I915_WRITE(WM3_LP_ILK, 0);
3858,16 → 5002,16
 
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableEarlyCull */
/* WaDisableEarlyCull:ivb */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
/* WaDisableBackToBackFlipFix */
/* WaDisableBackToBackFlipFix:ivb */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* WaDisablePSDDualDispatchEnable */
/* WaDisablePSDDualDispatchEnable:ivb */
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3875,11 → 5019,11
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
/* WaApplyL3ControlAndL3ChickenMode:ivb */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3892,7 → 5036,7
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
 
/* WaForceL3Serialization */
/* WaForceL3Serialization:ivb */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
3907,31 → 5051,23
* but we didn't debug actual testcases to find it out.
*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
/* This is required by WaCatErrorRejectionIssue */
/* This is required by WaCatErrorRejectionIssue:ivb */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
g4x_disable_trickle_feed(dev);
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
/* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
 
/* WaDisable4x2SubspanOptimization */
/* WaDisable4x2SubspanOptimization:ivb */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
3949,58 → 5085,44
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableEarlyCull */
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
/* WaDisableBackToBackFlipFix */
/* WaDisableBackToBackFlipFix:vlv */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* WaDisablePSDDualDispatchEnable */
/* WaDisablePSDDualDispatchEnable:vlv */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
/* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
/* WaApplyL3ControlAndL3ChickenMode:vlv */
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
/* WaForceL3Serialization */
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
/* WaDisableDopClockGating */
/* WaDisableDopClockGating:vlv */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
/* This is required by WaCatErrorRejectionIssue */
/* This is required by WaCatErrorRejectionIssue:vlv */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
4012,10 → 5134,10
* but we didn't debug actual testcases to find it out.
*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*
* Also apply WaDisableVDSUnitClockGating and
* WaDisableRCPBUnitClockGating.
* Also apply WaDisableVDSUnitClockGating:vlv and
* WaDisableRCPBUnitClockGating:vlv.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4026,18 → 5148,13
 
I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/*
* WaDisableVLVClockGating_VBIIssue
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
4073,6 → 5190,8
/* WaDisableRenderCachePipelinedFlush */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
g4x_disable_trickle_feed(dev);
}
 
static void crestline_init_clock_gating(struct drm_device *dev)
4084,6 → 5203,8
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
 
static void broadwater_init_clock_gating(struct drm_device *dev)
4096,6 → 5217,8
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
 
static void gen3_init_clock_gating(struct drm_device *dev)
4135,56 → 5258,164
dev_priv->display.init_clock_gating(dev);
}
 
void intel_suspend_hw(struct drm_device *dev)
{
if (HAS_PCH_LPT(dev))
lpt_suspend_hw(dev);
}
 
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
bool intel_using_power_well(struct drm_device *dev)
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev))
if (!HAS_POWER_WELL(dev))
return true;
 
switch (domain) {
case POWER_DOMAIN_PIPE_A:
case POWER_DOMAIN_TRANSCODER_EDP:
return true;
case POWER_DOMAIN_PIPE_B:
case POWER_DOMAIN_PIPE_C:
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
case POWER_DOMAIN_TRANSCODER_A:
case POWER_DOMAIN_TRANSCODER_B:
case POWER_DOMAIN_TRANSCODER_C:
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
else
return true;
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
default:
BUG();
}
}
 
void intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
 
if (!HAS_POWER_WELL(dev))
return;
 
if (!i915_disable_power_well && !enable)
return;
 
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
 
if (enable) {
if (!enable_requested)
I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
I915_WRITE(HSW_PWR_WELL_DRIVER,
HSW_PWR_WELL_ENABLE_REQUEST);
 
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_STATE), 20))
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
} else {
if (enable_requested) {
unsigned long irqflags;
enum pipe p;
 
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
 
/*
* After this, the registers on the pipes that are part
* of the power well will become zero, so we have to
* adjust our counters according to that.
*
* FIXME: Should we do this in general in
* drm_vblank_post_modeset?
*/
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for_each_pipe(p)
if (p != PIPE_A)
dev->last_vblank[p] = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
 
static struct i915_power_well *hsw_pwr;
 
/* Display audio driver power well request */
void i915_request_power_well(void)
{
if (WARN_ON(!hsw_pwr))
return;
 
spin_lock_irq(&hsw_pwr->lock);
if (!hsw_pwr->count++ &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, true);
spin_unlock_irq(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
 
/* Display audio driver power well release */
void i915_release_power_well(void)
{
if (WARN_ON(!hsw_pwr))
return;
 
spin_lock_irq(&hsw_pwr->lock);
WARN_ON(!hsw_pwr->count);
if (!--hsw_pwr->count &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, false);
spin_unlock_irq(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
 
int i915_init_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
hsw_pwr = &dev_priv->power_well;
 
hsw_pwr->device = dev;
spin_lock_init(&hsw_pwr->lock);
hsw_pwr->count = 0;
 
return 0;
}
 
void i915_remove_power_well(struct drm_device *dev)
{
hsw_pwr = NULL;
}
 
void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_well *power_well = &dev_priv->power_well;
 
if (!HAS_POWER_WELL(dev))
return;
 
if (!i915_disable_power_well && !enable)
return;
 
spin_lock_irq(&power_well->lock);
power_well->i915_request = enable;
 
/* only reject "disable" power well request */
if (power_well->count && !enable) {
spin_unlock_irq(&power_well->lock);
return;
}
 
__intel_set_power_well(dev, enable);
spin_unlock_irq(&power_well->lock);
}
 
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
4203,10 → 5434,21
 
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
 
/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
hsw_disable_package_c8(dev_priv);
}
 
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
hsw_enable_package_c8(dev_priv);
}
 
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
4215,7 → 5457,12
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->display.enable_fbc =
gen7_enable_fbc;
else
dev_priv->display.enable_fbc =
ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4237,8 → 5484,12
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
intel_setup_wm_latency(dev);
 
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
if (dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] &&
dev_priv->wm.cur_latency[1])
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
4247,7 → 5498,9
}
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
if (dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] &&
dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
4257,7 → 5510,9
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
if (SNB_READ_WM0_LATENCY()) {
if (dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] &&
dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
4267,10 → 5522,12
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
if (dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] &&
dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = haswell_update_wm;
dev_priv->display.update_sprite_wm =
haswell_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
4330,222 → 5587,6
}
}
 
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
u32 gt_thread_status_mask;
 
if (IS_HASWELL(dev_priv->dev))
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
else
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
 
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
 
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
}
 
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (dev_priv->forcewake_count++ == 0)
dev_priv->gt.force_wake_get(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
u32 gtfifodbg;
gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
"MMIO read or write has been dropped %x\n", gtfifodbg))
I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}
 
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
 
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (--dev_priv->forcewake_count == 0)
dev_priv->gt.force_wake_put(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
 
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->gt_fifo_count = fifo;
}
dev_priv->gt_fifo_count--;
 
return ret;
}
 
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
POSTING_READ(FORCEWAKE_ACK_VLV);
}
 
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
 
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
 
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
 
void intel_gt_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
}
 
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
spin_lock_init(&dev_priv->gt_lock);
 
intel_gt_reset(dev);
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4593,55 → 5634,68
return 0;
}
 
static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
u8 addr, u32 *val)
int vlv_gpu_freq(int ddr_freq, int val)
{
u32 cmd, devfn, port, be, bar;
int mult, base;
 
bar = 0;
be = 0xf;
port = IOSF_PORT_PUNIT;
devfn = PCI_DEVFN(2, 0);
switch (ddr_freq) {
case 800:
mult = 20;
base = 120;
break;
case 1066:
mult = 22;
base = 133;
break;
case 1333:
mult = 21;
base = 125;
break;
default:
return -1;
}
 
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
(bar << IOSF_BAR_SHIFT);
return ((val - 0xbd) * mult) + base;
}
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
int vlv_freq_opcode(int ddr_freq, int val)
{
int mult, base;
 
if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
opcode == PUNIT_OPCODE_REG_READ ?
"read" : "write");
return -EAGAIN;
switch (ddr_freq) {
case 800:
mult = 20;
base = 120;
break;
case 1066:
mult = 22;
base = 133;
break;
case 1333:
mult = 21;
base = 125;
break;
default:
return -1;
}
 
I915_WRITE(VLV_IOSF_ADDR, addr);
if (opcode == PUNIT_OPCODE_REG_WRITE)
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
val /= mult;
val -= base / mult;
val += 0xbd;
 
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
500)) {
DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
addr);
return -ETIMEDOUT;
if (val > 0xea)
val = 0xea;
 
return val;
}
 
if (opcode == PUNIT_OPCODE_REG_READ)
*val = I915_READ(VLV_IOSF_DATA);
I915_WRITE(VLV_IOSF_DATA, 0);
void intel_pm_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return 0;
}
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
 
int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
{
return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
}
 
int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
{
return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
}
/drivers/video/drm/i915/intel_ringbuffer.c
33,16 → 33,6
#include "i915_trace.h"
#include "intel_drv.h"
 
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
struct pipe_control {
struct drm_i915_gem_object *obj;
volatile u32 *cpu_page;
u32 gtt_offset;
};
 
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
175,8 → 165,7
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
 
 
213,8 → 202,7
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
 
/* Force SNB workarounds for PIPE_CONTROL flushes */
280,13 → 268,33
return 0;
}
 
static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
{
int ret;
 
if (!ring->fbc_dirty)
return 0;
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
intel_ring_advance(ring);
 
ring->fbc_dirty = false;
return 0;
}
 
static int
gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
 
/*
336,6 → 344,9
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
if (flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
return 0;
}
 
355,6 → 366,17
return I915_READ(acthd_reg);
}
 
static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
 
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
 
static int init_ring_common(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
366,6 → 388,11
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_get(dev_priv);
 
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
 
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
400,7 → 427,7
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
407,7 → 434,7
 
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
I915_READ_START(ring) == obj->gtt_offset &&
I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
425,6 → 452,8
ring->space = ring_space(ring);
ring->last_retired_head = -1;
 
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
out:
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_put(dev_priv);
435,69 → 464,43
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
struct pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret;
 
if (ring->private)
if (ring->scratch.obj)
return 0;
 
pc = kmalloc(sizeof(*pc), GFP_KERNEL);
if (!pc)
return -ENOMEM;
 
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) {
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
 
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 
ret = i915_gem_object_pin(obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret)
goto err_unref;
 
pc->gtt_offset = obj->gtt_offset;
pc->cpu_page = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
if (pc->cpu_page == NULL)
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW);
if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
 
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
ring->name, pc->gtt_offset);
 
pc->obj = obj;
ring->private = pc;
ring->name, ring->scratch.gtt_offset);
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin(ring->scratch.obj);
err_unref:
drm_gem_object_unreference(&obj->base);
drm_gem_object_unreference(&ring->scratch.obj->base);
err:
kfree(pc);
return ret;
}
 
static void
cleanup_pipe_control(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
 
if (!ring->private)
return;
 
obj = pc->obj;
// kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
 
kfree(pc);
ring->private = NULL;
}
 
static int init_render_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
504,8 → 507,6
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
 
ENTER();
 
if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
512,6 → 513,8
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
553,10 → 556,8
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
 
LEAVE();
 
return ret;
}
 
564,19 → 565,32
{
struct drm_device *dev = ring->dev;
 
if (!ring->private)
if (ring->scratch.obj == NULL)
return;
 
cleanup_pipe_control(ring);
if (INTEL_INFO(dev)->gen >= 5) {
// kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_unpin(ring->scratch.obj);
}
 
drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
}
 
static void
update_mboxes(struct intel_ring_buffer *ring,
u32 mmio_offset)
{
/* NB: In order to be able to do semaphore MBOX updates for varying number
* of rings, it's easiest if we round up each individual update to a
* multiple of 2 (since ring updates must always be a multiple of 2)
* even though the actual update only requires 3 dwords.
*/
#define MBOX_UPDATE_DWORDS 4
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_NOOP);
}
 
/**
591,19 → 605,24
static int
gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *useless;
int i, ret;
 
ret = intel_ring_begin(ring, 10);
ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
MBOX_UPDATE_DWORDS) +
4);
if (ret)
return ret;
#undef MBOX_UPDATE_DWORDS
 
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = ring->signal_mbox[i];
if (mbox_reg != GEN6_NOSYNC)
update_mboxes(ring, mbox_reg);
}
 
update_mboxes(ring, mbox1_reg);
update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
681,8 → 700,7
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
 
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
700,7 → 718,7
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
719,7 → 737,7
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
753,15 → 771,13
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
return ring->scratch.cpu_page[0];
}
 
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct pipe_control *pc = ring->private;
pc->cpu_page[0] = seqno;
ring->scratch.cpu_page[0] = seqno;
}
 
static bool
775,11 → 791,8
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
if (ring->irq_refcount++ == 0)
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
793,11 → 806,8
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
if (--ring->irq_refcount == 0)
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
881,8 → 891,6
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
 
ENTER();
 
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
897,6 → 905,9
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
906,9 → 917,19
 
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
LEAVE();
 
/* Flush the TLB for this page */
if (INTEL_INFO(dev)->gen >= 6) {
u32 reg = RING_INSTPM(ring->mmio_base);
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
ring->name);
}
}
 
static int
bsd_ring_flush(struct intel_ring_buffer *ring,
963,13 → 984,12
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
986,12 → 1006,11
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
else
I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
998,6 → 1017,44
gen6_gt_force_wake_put(dev_priv);
}
 
static bool
hsw_vebox_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void
hsw_vebox_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
1039,8 → 1096,7
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
u32 cs_offset = obj->gtt_offset;
u32 cs_offset = ring->scratch.gtt_offset;
 
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
1120,12 → 1176,12
 
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
ret = i915_gem_object_pin(obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
 
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
1134,7 → 1190,6
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
 
1148,10 → 1203,9
return ret;
}
 
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
static int init_phys_status_page(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
 
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
1160,11 → 1214,6
return -ENOMEM;
}
 
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
 
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
1192,7 → 1241,7
return ret;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_hws_pga(ring);
ret = init_phys_status_page(ring);
if (ret)
return ret;
}
1210,7 → 1259,7
 
ring->obj = obj;
 
ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
 
1219,7 → 1268,7
goto err_unpin;
 
ring->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
1417,7 → 1466,7
 
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
}
1491,9 → 1540,12
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
if (HAS_VEBOX(ring->dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
 
ring->set_seqno(ring, seqno);
ring->hangcheck.seqno = seqno;
}
 
void intel_ring_advance(struct intel_ring_buffer *ring)
1540,7 → 1592,7
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
 
static int gen6_ring_flush(struct intel_ring_buffer *ring,
static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
1612,9 → 1664,10
 
/* Blitter support (SandyBridge+) */
 
static int blt_ring_flush(struct intel_ring_buffer *ring,
static int gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
uint32_t cmd;
int ret;
 
1637,6 → 1690,10
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
if (IS_GEN7(dev) && flush)
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 
return 0;
}
 
1656,15 → 1713,18
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
ring->signal_mbox[0] = GEN6_VRSYNC;
ring->signal_mbox[1] = GEN6_BRSYNC;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
ring->signal_mbox[RCS] = GEN6_NOSYNC;
ring->signal_mbox[VCS] = GEN6_VRSYNC;
ring->signal_mbox[BCS] = GEN6_BRSYNC;
ring->signal_mbox[VECS] = GEN6_VERSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
1672,7 → 1732,8
ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
1715,7 → 1776,7
return -ENOMEM;
}
 
ret = i915_gem_object_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
1722,7 → 1783,8
return ret;
}
 
ring->private = obj;
ring->scratch.obj = obj;
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
 
return intel_init_ring_buffer(dev, ring);
1789,7 → 1851,7
}
 
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_hws_pga(ring);
ret = init_phys_status_page(ring);
if (ret)
return ret;
}
1812,20 → 1874,23
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
ring->write_tail = gen6_bsd_ring_write_tail;
ring->flush = gen6_ring_flush;
ring->flush = gen6_bsd_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
ring->signal_mbox[0] = GEN6_RVSYNC;
ring->signal_mbox[1] = GEN6_BVSYNC;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
ring->signal_mbox[RCS] = GEN6_RVSYNC;
ring->signal_mbox[VCS] = GEN6_NOSYNC;
ring->signal_mbox[BCS] = GEN6_BVSYNC;
ring->signal_mbox[VECS] = GEN6_VEVSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
1833,7 → 1898,7
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
} else {
1858,25 → 1923,60
 
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
ring->flush = blt_ring_flush;
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
ring->signal_mbox[0] = GEN6_RBSYNC;
ring->signal_mbox[1] = GEN6_VBSYNC;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
ring->signal_mbox[RCS] = GEN6_RBSYNC;
ring->signal_mbox[VCS] = GEN6_VBSYNC;
ring->signal_mbox[BCS] = GEN6_NOSYNC;
ring->signal_mbox[VECS] = GEN6_VEBSYNC;
ring->init = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
 
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
 
ring->name = "video enhancement ring";
ring->id = VECS;
 
ring->mmio_base = VEBOX_RING_BASE;
ring->write_tail = ring_write_tail;
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->signal_mbox[RCS] = GEN6_RVESYNC;
ring->signal_mbox[VCS] = GEN6_VVESYNC;
ring->signal_mbox[BCS] = GEN6_BVESYNC;
ring->signal_mbox[VECS] = GEN6_NOSYNC;
ring->init = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
 
int
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
{
/drivers/video/drm/i915/intel_ringbuffer.h
33,10 → 33,21
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
enum intel_ring_hangcheck_action {
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
 
struct intel_ring_hangcheck {
bool deadlock;
u32 seqno;
u32 acthd;
int score;
enum intel_ring_hangcheck_action action;
};
 
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
43,8 → 54,9
RCS = 0x0,
VCS,
BCS,
VECS,
} id;
#define I915_NUM_RINGS 3
#define I915_NUM_RINGS 4
u32 mmio_base;
void __iomem *virtual_start;
struct drm_device *dev;
67,7 → 79,7
*/
u32 last_retired_head;
 
u32 irq_refcount; /* protected by dev_priv->irq_lock */
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
102,8 → 114,11
struct intel_ring_buffer *to,
u32 seqno);
 
u32 semaphore_register[3]; /*our mbox written by others */
u32 signal_mbox[2]; /* mboxes this ring signals to */
/* our mbox written by others */
u32 semaphore_register[I915_NUM_RINGS];
/* mboxes this ring signals to */
u32 signal_mbox[I915_NUM_RINGS];
 
/**
* List of objects currently involved in rendering from the
* ringbuffer.
127,6 → 142,7
*/
u32 outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
 
wait_queue_head_t irq_queue;
 
135,9 → 151,15
*/
bool itlb_before_ctx_switch;
struct i915_hw_context *default_context;
struct drm_i915_gem_object *last_context_obj;
struct i915_hw_context *last_context;
 
void *private;
struct intel_ring_hangcheck hangcheck;
 
struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
};
 
static inline bool
224,6 → 246,7
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
/drivers/video/drm/i915/intel_sdvo.c
80,7 → 80,7
 
/*
* Capabilities of the SDVO device returned by
* i830_sdvo_get_capabilities()
* intel_sdvo_get_capabilities()
*/
struct intel_sdvo_caps caps;
 
202,15 → 202,14
u32 cur_dot_crawl, max_dot_crawl;
};
 
static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_sdvo, base.base);
return container_of(encoder, struct intel_sdvo, base);
}
 
static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_sdvo, base);
return to_sdvo(intel_attached_encoder(connector));
}
 
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
539,7 → 538,8
&status))
goto log_fail;
 
while (status == SDVO_CMD_STATUS_PENDING && --retry) {
while ((status == SDVO_CMD_STATUS_PENDING ||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
if (retry < 10)
msleep(15);
else
712,6 → 712,13
intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
 
static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
 
static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
726,6 → 733,13
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
 
static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
return intel_sdvo_get_timing(intel_sdvo,
SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
}
 
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
uint16_t clock,
774,6 → 788,8
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
 
memset(dtd, 0, sizeof(*dtd));
 
width = mode->hdisplay;
height = mode->vdisplay;
 
816,44 → 832,51
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
dtd->part2.reserved = 0;
}
 
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
const struct intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode->htotal = mode->hdisplay + dtd->part1.h_blank;
mode->htotal += (dtd->part1.h_high & 0xf) << 8;
struct drm_display_mode mode = {};
 
mode->vdisplay = dtd->part1.v_active;
mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode->vsync_start = mode->vdisplay;
mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode->vsync_end = mode->vsync_start +
mode.hdisplay = dtd->part1.h_active;
mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode.htotal = mode.hdisplay + dtd->part1.h_blank;
mode.htotal += (dtd->part1.h_high & 0xf) << 8;
 
mode.vdisplay = dtd->part1.v_active;
mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode.vsync_start = mode.vdisplay;
mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode.vsync_end = mode.vsync_start +
(dtd->part2.v_sync_off_width & 0xf);
mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
 
mode->clock = dtd->part1.clock * 10;
mode.clock = dtd->part1.clock * 10;
 
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
mode.flags |= DRM_MODE_FLAG_PHSYNC;
else
mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
mode.flags |= DRM_MODE_FLAG_PVSYNC;
else
mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
drm_mode_set_crtcinfo(&mode, 0);
 
drm_mode_copy(pmode, &mode);
}
 
static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
950,31 → 973,33
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
union hdmi_infoframe frame;
int ret;
ssize_t len;
 
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
 
if (intel_sdvo->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
 
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
if (len < 0)
return false;
 
intel_dip_infoframe_csum(&avi_if);
 
/* sdvo spec says that the ecc is handled by the hw, and it looks like
* we must not send the ecc field, either. */
memcpy(sdvo_data, &avi_if, 3);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
 
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
sdvo_data, sizeof(sdvo_data));
1041,10 → 1066,36
return true;
}
 
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
{
unsigned dotclock = pipe_config->adjusted_mode.clock;
struct dpll *clock = &pipe_config->dpll;
 
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
} else if (dotclock >= 140500 && dotclock <= 200000) {
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
} else {
WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
}
 
pipe_config->clock_set = true;
}
 
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
 
1066,6 → 1117,7
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
1097,6 → 1149,10
if (intel_sdvo->color_range)
pipe_config->limited_color_range = true;
 
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
 
return true;
}
 
1104,12 → 1160,11
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = intel_encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
&crtc->config.adjusted_mode;
struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
1166,6 → 1221,8
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
input_dtd.part1.clock /= crtc->config.pixel_multiplier;
 
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1172,8 → 1229,9
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
 
switch (intel_crtc->config.pixel_multiplier) {
switch (crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
1204,9 → 1262,9
}
 
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
 
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
1216,7 → 1274,7
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (intel_crtc->config.pixel_multiplier - 1)
sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
 
1231,7 → 1289,7
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
u16 active_outputs;
u16 active_outputs = 0;
 
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
 
1246,8 → 1304,8
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u16 active_outputs;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
 
tmp = I915_READ(intel_sdvo->sdvo_reg);
1264,10 → 1322,77
return true;
}
 
static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
u32 flags = 0, sdvox;
u8 val;
bool ret;
 
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
/* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function. */
DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
}
 
pipe_config->adjusted_mode.flags |= flags;
 
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
* the sdvo port register, on all other platforms it is part of the dpll
* state. Since the general pipe state readout happens before the
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
 
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
&val, 1)) {
switch (val) {
case SDVO_CLOCK_RATE_MULT_1X:
encoder_pixel_multiplier = 1;
break;
case SDVO_CLOCK_RATE_MULT_2X:
encoder_pixel_multiplier = 2;
break;
case SDVO_CLOCK_RATE_MULT_4X:
encoder_pixel_multiplier = 4;
break;
}
}
 
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
}
 
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u32 temp;
 
intel_sdvo_set_active_outputs(intel_sdvo, 0);
1309,7 → 1434,7
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
bool input1, input2;
1344,6 → 1469,7
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
 
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
{
struct drm_crtc *crtc;
1365,6 → 1491,8
return;
}
 
/* We set active outputs manually below in case pipe dpms doesn't change
* due to cloning. */
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
1467,7 → 1595,7
 
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
 
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
1495,7 → 1623,7
 
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
dev_priv->crt_ddc_pin));
dev_priv->vbt.crt_ddc_pin));
}
 
static enum drm_connector_status
1580,6 → 1708,9
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
1625,12 → 1756,9
if (ret == connector_status_connected) {
intel_sdvo->is_tv = false;
intel_sdvo->is_lvds = false;
intel_sdvo->base.needs_tv_clock = false;
 
if (response & SDVO_TV_MASK) {
if (response & SDVO_TV_MASK)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
1772,21 → 1900,12
struct drm_display_mode *newmode;
 
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
 
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode. Since
* drm_mode_probed_add adds the mode at the head of the list we add it
* last.
* SDVO->LVDS transcoders can't cope with the EDID mode.
*/
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode);
if (newmode != NULL) {
/* Guarantee the mode is preferred */
newmode->type = (DRM_MODE_TYPE_PREFERRED |
1795,6 → 1914,13
}
}
 
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
 
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
2076,7 → 2202,7
 
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
 
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
2329,7 → 2455,6
intel_sdvo_connector->output_flag = type;
 
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
2417,7 → 2542,6
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
intel_sdvo->is_tv = false;
intel_sdvo->base.needs_tv_clock = false;
intel_sdvo->is_lvds = false;
 
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
2751,7 → 2875,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
2780,23 → 2903,12
}
}
 
hotplug_mask = 0;
if (IS_G4X(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
} else if (IS_GEN4(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
} else {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
 
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
 
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
/drivers/video/drm/i915/intel_sideband.c
0,0 → 1,177
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
 
#include "i915_drv.h"
#include "intel_drv.h"
 
/* IOSF sideband */
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
u32 cmd, be = 0xf, bar = 0;
bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
opcode == DPIO_OPCODE_REG_READ);
 
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
(bar << IOSF_BAR_SHIFT);
 
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
is_read ? "read" : "write");
return -EAGAIN;
}
 
I915_WRITE(VLV_IOSF_ADDR, addr);
if (!is_read)
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
 
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
return -ETIMEDOUT;
}
 
if (is_read)
*val = I915_READ(VLV_IOSF_DATA);
I915_WRITE(VLV_IOSF_DATA, 0);
 
return 0;
}
 
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_READ, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
 
return val;
}
 
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_WRITE, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
 
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
PUNIT_OPCODE_REG_READ, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
 
return val;
}
 
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
u32 val = 0;
 
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
DPIO_OPCODE_REG_READ, reg, &val);
 
return val;
}
 
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
DPIO_OPCODE_REG_WRITE, reg, &val);
}
 
/* SBI access */
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
 
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
 
return I915_READ(SBI_DATA);
}
 
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
u32 tmp;
 
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
 
if (destination == SBI_ICLK)
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
else
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
}
}
/drivers/video/drm/i915/intel_sprite.c
32,12 → 32,14
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
107,6 → 109,9
 
sprctl |= SP_ENABLE;
 
intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
113,8 → 118,6
crtc_w--;
crtc_h--;
 
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
132,13 → 135,13
 
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
 
static void
vlv_disable_plane(struct drm_plane *dplane)
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
151,6 → 154,8
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
 
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
}
 
static int
205,7 → 210,8
}
 
static void
ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
254,13 → 260,19
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
 
/* must disable */
if (IS_HASWELL(dev))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 
sprctl |= SPRITE_ENABLE;
 
if (IS_HASWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
267,8 → 279,6
crtc_w--;
crtc_h--;
 
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
307,7 → 317,8
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
I915_MODIFY_DISPBASE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
 
/* potentially re-enable LP watermarks */
316,7 → 327,7
}
 
static void
ivb_disable_plane(struct drm_plane *plane)
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
334,6 → 345,8
 
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
 
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
intel_update_watermarks(dev);
394,7 → 407,8
}
 
static void
ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
446,6 → 460,9
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
452,8 → 469,6
crtc_w--;
crtc_h--;
 
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
475,12 → 490,13
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
I915_MODIFY_DISPBASE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
 
static void
ilk_disable_plane(struct drm_plane *plane)
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
493,6 → 509,8
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
 
static void
583,6 → 601,20
key->flags = I915_SET_COLORKEY_NONE;
}
 
static bool
format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YVYU:
return true;
default:
return false;
}
}
 
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
600,9 → 632,29
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
bool disable_primary = false;
bool visible;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
struct drm_rect src = {
/* sample coordinates in 16.16 fixed point */
.x1 = src_x,
.x2 = src_x + src_w,
.y1 = src_y,
.y2 = src_y + src_h,
};
struct drm_rect dst = {
/* integer pixels */
.x1 = crtc_x,
.x2 = crtc_x + crtc_w,
.y1 = crtc_y,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
618,19 → 670,23
intel_plane->src_w = src_w;
intel_plane->src_h = src_h;
 
src_w = src_w >> 16;
src_h = src_h >> 16;
 
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
DRM_DEBUG_KMS("Pipe disabled\n");
return -EINVAL;
}
 
if (crtc_x >= primary_w || crtc_y >= primary_h)
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
return -EINVAL;
}
 
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe)
/* FIXME check all gen limits */
if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
return -EINVAL;
}
 
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
638,55 → 694,123
case I915_TILING_X:
break;
default:
DRM_DEBUG_KMS("Unsupported tiling mode\n");
return -EINVAL;
}
 
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
* The caller must handle that by adjusting source offset and size.
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
crtc_w += crtc_x;
crtc_x = 0;
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
 
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(hscale < 0);
 
vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
BUG_ON(vscale < 0);
 
visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
 
crtc_x = dst.x1;
crtc_y = dst.y1;
crtc_w = drm_rect_width(&dst);
crtc_h = drm_rect_height(&dst);
 
if (visible) {
/* check again in case clipping clamped the results */
hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
 
return hscale;
}
if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
goto out;
if ((crtc_x + crtc_w) > primary_w)
crtc_w = primary_w - crtc_x;
 
if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
crtc_h += crtc_y;
crtc_y = 0;
vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
 
return vscale;
}
if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
goto out;
if (crtc_y + crtc_h > primary_h)
crtc_h = primary_h - crtc_y;
 
if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out;
/* Make the source viewport size an exact multiple of the scaling factors. */
drm_rect_adjust_size(&src,
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
 
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src.x1 < (int) src_x ||
src.y1 < (int) src_y ||
src.x2 > (int) (src_x + src_w) ||
src.y2 > (int) (src_y + src_h));
 
/*
* We may not have a scaler, eg. HSW does not have it any more
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
return -EINVAL;
src_x = src.x1 >> 16;
src_w = drm_rect_width(&src) >> 16;
src_y = src.y1 >> 16;
src_h = drm_rect_height(&src) >> 16;
 
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
src_w &= ~1;
 
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
* Must keep src and dst the
* same if we can't scale.
*/
if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
if (!intel_plane->can_scale)
crtc_w &= ~1;
 
if (crtc_w == 0)
visible = false;
}
}
 
/* Check size restrictions when scaling */
if (visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
 
WARN_ON(!intel_plane->can_scale);
 
/* FIXME interlacing min height is 6 */
 
if (crtc_w < 3 || crtc_h < 3)
visible = false;
 
if (src_w < 3 || src_h < 3)
visible = false;
 
width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
 
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
}
 
dst.x1 = crtc_x;
dst.x2 = crtc_x + crtc_w;
dst.y1 = crtc_y;
dst.y2 = crtc_y + crtc_h;
 
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
if ((crtc_x == 0) && (crtc_y == 0) &&
(crtc_w == primary_w) && (crtc_h == primary_h))
disable_primary = true;
disable_primary = drm_rect_equals(&dst, &clip);
WARN_ON(disable_primary && !visible);
 
mutex_lock(&dev->struct_mutex);
 
708,8 → 832,12
if (!disable_primary)
intel_enable_primary(crtc);
 
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
if (visible)
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
intel_plane->disable_plane(plane, crtc);
 
if (disable_primary)
intel_disable_primary(crtc);
732,7 → 860,6
 
out_unlock:
mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
 
743,9 → 870,14
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
 
if (plane->crtc)
if (!plane->fb)
return 0;
 
if (WARN_ON(!plane->crtc))
return -EINVAL;
 
intel_enable_primary(plane->crtc);
intel_plane->disable_plane(plane);
intel_plane->disable_plane(plane, plane->crtc);
 
if (!intel_plane->obj)
goto out;
845,6 → 977,14
intel_plane->src_w, intel_plane->src_h);
}
 
void intel_plane_disable(struct drm_plane *plane)
{
if (!plane->crtc || !plane->fb)
return;
 
intel_disable_plane(plane);
}
 
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
918,13 → 1058,15
break;
 
case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
if (IS_IVYBRIDGE(dev)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
intel_plane->can_scale = false;
else
intel_plane->can_scale = true;
intel_plane->max_downscale = 1;
}
 
if (IS_VALLEYVIEW(dev)) {
intel_plane->max_downscale = 1;
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
intel_plane->update_colorkey = vlv_update_colorkey;
933,7 → 1075,6
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
/drivers/video/drm/i915/intel_uncore.c
0,0 → 1,600
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#include "i915_drv.h"
#include "intel_drv.h"
 
#define FORCEWAKE_ACK_TIMEOUT_MS 2
 
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
 
 
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
u32 gt_thread_status_mask;
 
if (IS_HASWELL(dev_priv->dev))
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
else
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
 
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
 
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE, 1);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
/* WaRsForcewakeWaitTC0:snb */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
}
 
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
/* WaRsForcewakeWaitTC0:ivb,hsw */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
u32 gtfifodbg;
 
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
"MMIO read or write has been dropped %x\n", gtfifodbg))
__raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}
 
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
 
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
 
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->uncore.fifo_count = fifo;
}
dev_priv->uncore.fifo_count--;
 
return ret;
}
 
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
 
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
 
/* WaRsForcewakeWaitTC0:vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
 
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
 
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
} else if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
/* IVB configs may use multi-threaded forcewake */
 
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
}
 
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
}
 
void intel_uncore_sanitize(struct drm_device *dev)
{
intel_uncore_forcewake_reset(dev);
 
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
}
 
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
 
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
__raw_i915_write32(dev_priv, MI_MODE, 0);
}
 
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
 
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed write to %x\n", reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
 
#define __i915_read(x) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
unsigned long irqflags; \
u##x val = 0; \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
if (dev_priv->info->gen == 5) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_put(dev_priv); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
return val; \
}
 
__i915_read(8)
__i915_read(16)
__i915_read(32)
__i915_read(64)
#undef __i915_read
 
#define __i915_write(x) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
unsigned long irqflags; \
u32 __fifo_ret = 0; \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (dev_priv->info->gen == 5) \
ilk_dummy_write(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
}
__i915_write(8)
__i915_write(16)
__i915_write(32)
__i915_write(64)
#undef __i915_write
 
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
};
 
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
int i;
 
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
 
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
 
switch (entry->size) {
case 8:
reg->val = I915_READ64(reg->offset);
break;
case 4:
reg->val = I915_READ(reg->offset);
break;
case 2:
reg->val = I915_READ16(reg->offset);
break;
case 1:
reg->val = I915_READ8(reg->offset);
break;
default:
WARN_ON(1);
return -EINVAL;
}
 
return 0;
}
 
static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_I85X(dev))
return -ENODEV;
 
I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
POSTING_READ(D_STATE);
 
if (IS_I830(dev) || IS_845G(dev)) {
I915_WRITE(DEBUG_RESET_I830,
DEBUG_RESET_DISPLAY |
DEBUG_RESET_RENDER |
DEBUG_RESET_FULL);
POSTING_READ(DEBUG_RESET_I830);
msleep(1);
 
I915_WRITE(DEBUG_RESET_I830, 0);
POSTING_READ(DEBUG_RESET_I830);
}
 
msleep(1);
 
I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
POSTING_READ(D_STATE);
 
return 0;
}
 
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
 
static int i965_do_reset(struct drm_device *dev)
{
int ret;
 
/*
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
* well as the reset bit (GR/bit 0). Setting the GR bit
* triggers the reset; when done, the hardware will clear it.
*/
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
 
/* We can't reset render&media without also resetting display ... */
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
 
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
 
return 0;
}
 
static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 gdrst;
int ret;
 
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
if (ret)
return ret;
 
/* We can't reset render&media without also resetting display ... */
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
 
static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
unsigned long irqflags;
 
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
/* Reset the chip */
 
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
 
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
 
intel_uncore_forcewake_reset(dev);
 
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
else
dev_priv->uncore.funcs.force_wake_put(dev_priv);
 
/* Restore fifo count */
dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
}
 
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
case 4: return i965_do_reset(dev);
case 2: return i8xx_do_reset(dev);
default: return -ENODEV;
}
}
 
void intel_uncore_clear_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* XXX needs spinlock around caller's grouping */
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
 
void intel_uncore_check_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed register before interrupt\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
/drivers/video/drm/i915/kms_display.c
17,9 → 17,6
 
#include "bitmap.h"
 
extern struct drm_device *main_device;
 
 
typedef struct
{
kobj_t header;
147,7 → 144,6
 
do_set:
 
 
encoder = connector->encoder;
crtc = encoder->crtc;
 
365,8 → 361,6
};
safe_sti(ifl);
 
main_device = dev;
 
#ifdef __HWA__
err = init_bitmaps();
#endif
471,16 → 465,23
if (unlikely(obj == NULL))
return -ENOMEM;
 
ret = i915_gem_object_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true, true);
ret = i915_gem_obj_ggtt_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
{
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
/* You don't need to worry about fragmentation issues.
* GTT space is continuous. I guarantee it. */
 
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + obj->gtt_offset,
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW);
 
if (unlikely(bits == NULL))
612,7 → 613,7
os_display->cursor = cursor;
 
if (!dev_priv->info->cursor_needs_physical)
intel_crtc->cursor_addr = cursor->cobj->gtt_offset;
intel_crtc->cursor_addr = i915_gem_obj_ggtt_offset(cursor->cobj);
else
intel_crtc->cursor_addr = (addr_t)cursor->cobj;
 
855,13 → 856,20
ts->tv_nsec = (tmp - ts->tv_sec*100)*10000000;
}
 
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
{
while (nsec >= NSEC_PER_SEC) {
/*
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC;
--sec;
}
869,7 → 877,6
ts->tv_nsec = nsec;
}
 
 
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
/drivers/video/drm/i915/main.c
26,8 → 26,8
uint8_t revision;
};
 
extern struct drm_device *main_device;
extern struct drm_file *drm_file_handlers[256];
struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
 
void cpu_detect();
 
63,7 → 63,6
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
 
int err = 0;
 
if(action != 1)
80,8 → 79,9
 
if(!dbg_open(log))
{
strcpy(log, "/tmp1/1/i915.log");
// strcpy(log, "/tmp1/1/i915.log");
// strcpy(log, "/RD/1/DRIVERS/i915.log");
strcpy(log, "/BD1/4/i915.log");
 
if(!dbg_open(log))
{
89,7 → 89,7
return 0;
};
}
dbgprintf(" i915 v3.10\n cmdline: %s\n", cmdline);
dbgprintf(" i915 v3.12-6\n cmdline: %s\n", cmdline);
 
cpu_detect();
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
97,12 → 97,12
enum_pci_devices();
 
err = i915_init();
 
if(err)
{
dbgprintf("Epic Fail :(\n");
return 0;
};
init_display_kms(main_device);
 
err = RegService("DISPLAY", display_handler);
 
110,7 → 110,9
dbgprintf("Set DISPLAY handler\n");
 
struct drm_i915_private *dev_priv = main_device->dev_private;
 
driver_wq_state = 1;
 
run_workqueue(dev_priv->wq);
 
return err;
/drivers/video/drm/i915/utils.c
104,24 → 104,8
kfree(filep->pages);
}
 
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = 13;
 
return 0;
}
 
 
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
509,4 → 493,14
buf, len, true);
}
 
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
 
p = kmalloc(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}