Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 1877 → Rev 1963

/drivers/video/drm/drm_crtc.c
30,9 → 30,11
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/list.h>
#include <linux/slab.h>
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
 
struct drm_prop_enum_list {
int type;
78,6 → 80,7
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
{ DRM_MODE_DITHERING_AUTO, "Automatic" },
};
 
/*
153,12 → 156,12
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
{ DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
{ DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
};
 
static struct drm_prop_enum_list drm_encoder_enum_list[] =
493,7 → 496,6
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
 
kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
857,7 → 859,6
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
1074,6 → 1075,9
uint32_t __user *encoder_id;
struct drm_mode_group *mode_group;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/*
1111,7 → 1115,7
if (card_res->count_fbs >= fb_count) {
copied = 0;
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
list_for_each_entry(fb, &file_priv->fbs, head) {
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (put_user(fb->base.id, fb_id + copied)) {
ret = -EFAULT;
goto out;
1128,7 → 1132,7
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
1156,8 → 1160,8
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
DRM_DEBUG_KMS("ENCODER ID is %d\n",
encoder->base.id);
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
drm_get_encoder_name(encoder));
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
1187,8 → 1191,9
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
connector->base.id);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
1211,7 → 1216,7
}
card_res->count_connectors = connector_count;
 
DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
card_res->count_connectors, card_res->count_encoders);
 
out:
1244,6 → 1249,9
struct drm_mode_object *obj;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
1312,9 → 1320,12
uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
 
DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
 
mutex_lock(&dev->mode_config.mutex);
 
1431,6 → 1442,9
struct drm_encoder *encoder;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
1486,6 → 1500,9
int ret = 0;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
1495,6 → 1512,7
goto out;
}
crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
1571,6 → 1589,9
goto out;
}
connector = obj_to_connector(obj);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
 
connector_set[i] = connector;
}
1599,6 → 1620,9
struct drm_crtc *crtc;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if (!req->flags) {
DRM_ERROR("no operation set\n");
return -EINVAL;
1663,6 → 1687,9
struct drm_framebuffer *fb;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("mode new framebuffer width not within limits\n");
return -EINVAL;
1674,18 → 1701,19
 
mutex_lock(&dev->mode_config.mutex);
 
/* TODO check buffer is sufficently large */
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (!fb) {
if (IS_ERR(fb)) {
DRM_ERROR("could not create framebuffer\n");
ret = -EINVAL;
ret = PTR_ERR(fb);
goto out;
}
 
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
 
out:
mutex_unlock(&dev->mode_config.mutex);
1719,9 → 1747,12
int ret = 0;
int found = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we realy get a framebuffer back. */
/* TODO check that we really get a framebuffer back. */
if (!obj) {
DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
1775,6 → 1806,9
struct drm_framebuffer *fb;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
1808,6 → 1842,9
int num_clips;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
1842,12 → 1879,15
 
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
if (ret)
if (ret) {
ret = -EFAULT;
goto out_err2;
}
}
 
if (fb->funcs->dirty) {
ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
} else {
ret = -ENOSYS;
goto out_err2;
1991,6 → 2031,9
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2037,6 → 2080,9
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2208,6 → 2254,9
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
2332,6 → 2381,9
int ret = 0;
void *blob_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
2359,7 → 2411,7
struct edid *edid)
{
struct drm_device *dev = connector->dev;
int ret = 0;
int ret = 0, size;
 
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
2371,7 → 2423,9
return ret;
}
 
connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
 
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
2392,6 → 2446,9
int ret = -EINVAL;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2510,6 → 2567,9
int size;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
2543,7 → 2603,7
goto out;
}
 
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
 
out:
mutex_unlock(&dev->mode_config.mutex);
2561,6 → 2621,9
int size;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
2601,3 → 2664,22
#endif
 
 
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->funcs->reset)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
/drivers/video/drm/drm_crtc_helper.c
34,6 → 34,8
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
 
static bool drm_kms_helper_poll = true;
 
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
55,7 → 57,7
}
 
/**
* drm_helper_probe_connector_modes - get complete set of display modes
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
86,7 → 88,8
int count = 0;
int mode_flags = 0;
 
DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
98,21 → 101,23
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
} else
connector->status = connector->funcs->detect(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
// drm_kms_helper_poll_enable(dev);
}
 
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
 
count = (*connector_funcs->get_modes)(connector);
if (!count) {
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (!count)
return 0;
}
if (count == 0)
goto prune;
 
drm_mode_connector_list_update(connector);
 
140,7 → 145,7
 
drm_mode_sort(&connector->modes);
 
DRM_DEBUG_KMS("Probed modes for %s\n",
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
153,21 → 158,6
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
uint32_t maxY)
{
struct drm_connector *connector;
int count = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
count += drm_helper_probe_single_connector_modes(connector,
maxX, maxY);
}
 
return count;
}
EXPORT_SYMBOL(drm_helper_probe_connector_modes);
 
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
215,6 → 205,17
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
 
static void
drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
}
 
/**
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
229,7 → 230,6
{
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
240,12 → 240,8
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
if (!drm_helper_encoder_in_use(encoder)) {
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
}
255,7 → 251,10
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc_funcs->disable)
(*crtc_funcs->disable)(crtc);
else
(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
}
}
262,302 → 261,6
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
 
static bool drm_has_cmdline_mode(struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
 
if (!fb_help_conn)
return false;
 
cmdline_mode = &fb_help_conn->cmdline_mode;
return cmdline_mode->specified;
}
 
static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
 
if (!fb_help_conn)
return mode;
 
cmdline_mode = &fb_help_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
 
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
*/
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
 
list_for_each_entry(mode, &connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
 
if (cmdline_mode->refresh_specified) {
if (mode->vrefresh != cmdline_mode->refresh)
continue;
}
 
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
}
return mode;
}
 
create_mode:
mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &connector->modes);
return mode;
}
 
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
 
if (strict) {
enable = connector->status == connector_status_connected;
} else {
enable = connector->status != connector_status_disconnected;
}
return enable;
}
 
static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
enabled[i] ? "yes" : "no");
any_enabled |= enabled[i];
i++;
}
 
if (any_enabled)
return;
 
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, false);
i++;
}
}
 
static bool drm_target_preferred(struct drm_device *dev,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_connector *connector;
int i = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
if (enabled[i] == false) {
i++;
continue;
}
 
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
connector->base.id);
 
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(connector, width, height);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
connector->base.id);
modes[i] = drm_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&connector->modes)) {
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
i++;
}
return true;
}
 
static int drm_pick_crtcs(struct drm_device *dev,
struct drm_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
int c, o;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *best_crtc;
int my_score, best_score, score;
struct drm_crtc **crtcs, *crtc;
 
if (n == dev->mode_config.num_connector)
return 0;
c = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (c == n)
break;
c++;
}
 
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
 
crtcs = kmalloc(dev->mode_config.num_connector *
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
 
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_cmdline_mode(connector))
my_score++;
if (drm_has_preferred_mode(connector, width, height))
my_score++;
 
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (!encoder)
goto out;
 
connector->encoder = encoder;
 
/* select a crtc for this connector and then attempt to configure
remaining connectors */
c = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 
if ((encoder->possible_crtcs & (1 << c)) == 0) {
c++;
continue;
}
 
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
 
if (o < n) {
/* ignore cloning for now */
c++;
continue;
}
 
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
sizeof(struct drm_crtc *));
}
c++;
}
out:
kfree(crtcs);
return best_score;
}
 
static void drm_setup_crtcs(struct drm_device *dev)
{
struct drm_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_connector *connector;
bool *enabled;
int width, height;
int i, ret;
 
DRM_DEBUG_KMS("\n");
 
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
 
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
 
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
 
drm_enable_connectors(dev, enabled);
 
ret = drm_target_preferred(dev, modes, enabled, width, height);
if (!ret)
DRM_ERROR("Unable to find initial modes\n");
 
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
 
drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
 
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
 
if (connector->encoder == NULL) {
i++;
continue;
}
 
if (mode && crtc) {
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
mode->name, crtc->base.id);
crtc->desired_mode = mode;
connector->encoder->crtc = crtc;
} else {
connector->encoder->crtc = NULL;
connector->encoder = NULL;
}
i++;
}
 
kfree(crtcs);
kfree(modes);
kfree(enabled);
}
 
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
572,7 → 275,7
struct drm_crtc *tmp;
int crtc_mask = 1;
 
WARN(!crtc, "checking null crtc?");
WARN(!crtc, "checking null crtc?\n");
 
dev = crtc->dev;
 
602,11 → 305,11
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
drm_encoder_disable(encoder);
/* Disable encoders whose CRTC is about to change */
if (encoder_funcs->get_crtc &&
encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
drm_encoder_disable(encoder);
}
}
 
632,7 → 335,7
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
639,13 → 342,13
struct drm_encoder *encoder;
bool ret = true;
 
adjusted_mode = drm_mode_duplicate(dev, mode);
 
crtc->enabled = drm_helper_crtc_in_use(crtc);
 
if (!crtc->enabled)
return true;
 
adjusted_mode = drm_mode_duplicate(dev, mode);
 
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
675,6 → 378,7
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
goto done;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
702,8 → 406,9
if (encoder->crtc != crtc)
continue;
 
DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
mode->name, mode->base.id);
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, drm_get_encoder_name(encoder),
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
721,11 → 426,20
 
}
 
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* Store real post-adjustment hardware mode. */
crtc->hwmode = *adjusted_mode;
 
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc);
 
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
765,6 → 479,7
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
int i;
 
DRM_DEBUG_KMS("\n");
 
779,10 → 494,18
 
crtc_funcs = set->crtc->helper_private;
 
DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
" %d (x, y) (%i, %i)\n",
set->crtc, set->crtc->base.id, set->fb, set->connectors,
if (!set->mode)
set->fb = NULL;
 
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id,
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
set->mode = NULL;
set->num_connectors = 0;
}
 
dev = set->crtc->dev;
 
911,9 → 634,15
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
connector->base.id, new_crtc);
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.id, drm_get_connector_name(connector),
new_crtc->base.id);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.id, drm_get_connector_name(connector));
}
}
 
/* mode_set_base is not a required function */
if (fb_changed && !crtc_funcs->mode_set_base)
920,26 → 649,29
mode_changed = true;
 
if (mode_changed) {
old_fb = set->crtc->fb;
set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
if (set->crtc->enabled) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
old_fb = set->crtc->fb;
set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
DRM_ERROR("failed to set mode on crtc %p\n",
set->crtc);
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
set->crtc->fb = old_fb;
ret = -EINVAL;
goto fail;
}
/* TODO are these needed? */
set->crtc->desired_x = set->x;
set->crtc->desired_y = set->y;
set->crtc->desired_mode = set->mode;
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
}
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
set->crtc->x = set->x;
950,9 → 682,11
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
if (ret != 0)
if (ret != 0) {
set->crtc->fb = old_fb;
goto fail;
}
}
 
kfree(save_connectors);
kfree(save_encoders);
983,63 → 717,6
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
 
bool drm_helper_plugged_event(struct drm_device *dev)
{
DRM_DEBUG_KMS("\n");
 
drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
dev->mode_config.max_height);
 
drm_setup_crtcs(dev);
 
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
 
/* FIXME: send hotplug event */
return true;
}
/**
* drm_initial_config - setup a sane initial connector configuration
* @dev: DRM device
*
* LOCKING:
* Called at init time, must take mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
 
/* disable all the possible outputs/crtcs before entering KMS mode */
// drm_helper_disable_unused_functions(dev);
 
// drm_fb_helper_parse_command_line(dev);
 
count = drm_helper_probe_connector_modes(dev,
dev->mode_config.max_width,
dev->mode_config.max_height);
 
/*
* we shouldn't end up with no modes here.
*/
if (count == 0)
printk(KERN_INFO "No connectors reported connected with modes\n");
 
drm_setup_crtcs(dev);
 
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
 
return 0;
}
EXPORT_SYMBOL(drm_helper_initial_config);
 
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
1122,27 → 799,6
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
 
/**
* drm_hotplug_stage_two
* @dev DRM device
* @connector hotpluged connector
*
* LOCKING.
* Caller must hold mode config lock, function might grab struct lock.
*
* Stage two of a hotplug.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_helper_hotplug_stage_two(struct drm_device *dev)
{
drm_helper_plugged_event(dev);
 
return 0;
}
EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
 
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
1186,6 → 842,7
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
}
 
crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
1193,9 → 850,119
drm_helper_choose_crtc_dpms(crtc));
}
}
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
#if 0
 
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status;
bool repoll = false, changed = false;
 
if (!drm_kms_helper_poll)
return;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
/* if this is HPD or polled don't check it -
TV out for instance */
if (!connector->polled)
continue;
 
else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
repoll = true;
 
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
 
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
if (old_status != connector->status)
changed = true;
}
 
mutex_unlock(&dev->mode_config.mutex);
 
if (changed) {
/* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
 
if (repoll)
queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
 
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
return;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
 
if (poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
void drm_kms_helper_poll_init(struct drm_device *dev)
{
INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
 
drm_kms_helper_poll_enable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
 
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
 
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
if (drm_kms_helper_poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
 
 
#endif
/drivers/video/drm/drm_dp_i2c_helper.c
23,7 → 23,6
#include <linux/kernel.h>
#include <linux/module.h>
//#include <linux/delay.h>
//#include <linux/slab.h>
//#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
/drivers/video/drm/drm_edid.c
2,6 → 2,7
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
* Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
27,16 → 28,20
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
 
/*
* TODO:
* - support EDID 1.4 (incl. CE blocks)
*/
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
((edid)->version == (maj) && (edid)->revision > (min)))
 
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
 
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
* them here (note that userspace may work around broken monitors first,
61,10 → 66,18
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
 
struct detailed_mode_closure {
struct drm_connector *connector;
struct edid *edid;
bool preferred;
u32 quirks;
int modes;
};
 
#define LEVEL_DMT 0
#define LEVEL_GTF 1
#define LEVEL_CVT 2
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
 
static struct edid_quirk {
char *vendor;
84,6 → 97,8
 
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
/* Envision EN2028 */
{ "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
 
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
106,26 → 121,26
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
 
/*** DDC fetch and block validation ***/
 
/* Valid EDID header has these bytes */
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
 
/**
* drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity check the EDID block by looking at the header, the version number
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
* valid.
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
bool drm_edid_is_valid(struct edid *edid)
static bool
drm_edid_block_valid(u8 *raw_edid)
{
int i, score = 0;
int i;
u8 csum = 0;
u8 *raw_edid = (u8 *)edid;
struct edid *edid = (struct edid *)raw_edid;
 
if (raw_edid[0] == 0x00) {
int score = 0;
 
for (i = 0; i < sizeof(edid_header); i++)
if (raw_edid[i] == edid_header[i])
score++;
134,16 → 149,24
else if (score >= 6) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else
} else {
goto bad;
}
}
 
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
 
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
goto bad;
}
 
/* per-block-type checks */
switch (raw_edid[0]) {
case 0: /* base */
if (edid->version != 1) {
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
goto bad;
151,7 → 174,12
 
if (edid->revision > 4)
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
break;
 
default:
break;
}
 
return 1;
 
bad:
162,9 → 190,191
}
return 0;
}
 
/**
* drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity-check an entire EDID record (including extensions)
*/
bool drm_edid_is_valid(struct edid *edid)
{
int i;
u8 *raw = (u8 *)edid;
 
if (!edid)
return false;
 
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
return false;
 
return true;
}
EXPORT_SYMBOL(drm_edid_is_valid);
 
#define DDC_ADDR 0x50
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
*
* \param adapter : i2c device adaptor
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
*/
static int
drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
int ret, retries = 5;
 
/* The core i2c driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
* of the individual block a few times seems to overcome this.
*/
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
} while (ret != 2 && --retries);
 
return ret == 2 ? 0 : -1;
}
 
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
size_t alloc_size;
 
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
 
/* base block fetch */
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block))
break;
}
if (i == 4)
goto carp;
 
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
return block;
 
alloc_size = (block[0x7e] + 1) * EDID_LENGTH ;
 
new = kmalloc(alloc_size, GFP_KERNEL);
 
if (!new)
goto out;
 
memcpy(new, block, EDID_LENGTH);
kfree(block);
 
block = new;
 
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
valid_extensions++;
break;
}
}
if (i == 4)
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
}
 
if (valid_extensions != block[0x7e]) {
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
block[0x7e] = valid_extensions;
new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
memcpy(new, block, alloc_size);
kfree(block);
block = new;
}
 
return block;
 
carp:
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
 
out:
kfree(block);
return NULL;
}
 
/**
* Probe DDC presence.
*
* \param adapter : i2c device adaptor
* \return 1 on success
*/
static bool
drm_probe_ddc(struct i2c_adapter *adapter)
{
unsigned char out;
 
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
 
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
*
* Poke the given i2c channel to grab EDID data if possible. If found,
* attach it to the connector.
*
* Return edid data or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid = NULL;
 
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
connector->display_info.raw_edid = (char *)edid;
 
return edid;
 
}
EXPORT_SYMBOL(drm_get_edid);
 
/*** EDID parsing ***/
 
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
* @vendor: vendor string
208,7 → 418,6
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
 
 
/**
* edid_fixup_preferred - set preferred modes based on quirk list
* @connector: has mode list to fix up
255,254 → 464,14
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
/*
* Add the Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x400@85Hz */
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 400, 401, 404, 445, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 720x400@85Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
828, 936, 0, 400, 401, 404, 446, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@85Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
752, 832, 0, 480, 481, 484, 509, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@56Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@72Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@85Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1072, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@75Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
1488, 1696, 0, 768, 771, 778, 805, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@85Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@75Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
1488, 1696, 0, 800, 803, 809, 838, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@85Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@85Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@75Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@85Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@75Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
1688, 1936, 0, 900, 903, 909, 942, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@85Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@65Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@70Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@75Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@85Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1729x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@75Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@75Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@85Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@75Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@75HZ */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@85HZ */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
struct drm_display_mode *mode = NULL;
int i;
struct drm_display_mode *ptr, *mode;
 
mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
513,7 → 482,164
}
return mode;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
 
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
 
static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
u8 rev = ext[0x01], d = ext[0x02];
u8 *det_base = ext + d;
 
switch (rev) {
case 0:
/* can't happen */
return;
case 1:
/* have to infer how many blocks we have, check pixel clock */
for (i = 0; i < 6; i++)
if (det_base[18*i] || det_base[18*i+1])
n++;
break;
default:
/* explicit count */
n = min(ext[0x03] & 0x0f, 6);
break;
}
 
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
 
static void
vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
unsigned int i, n = min((int)ext[0x02], 6);
u8 *det_base = ext + 5;
 
if (ext[0x01] != 1)
return; /* unknown version */
 
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
 
static void
drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
{
int i;
struct edid *edid = (struct edid *)raw_edid;
 
if (edid == NULL)
return;
 
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&(edid->detailed_timings[i]), closure);
 
for (i = 1; i <= raw_edid[0x7e]; i++) {
u8 *ext = raw_edid + (i * EDID_LENGTH);
switch (*ext) {
case CEA_EXT:
cea_for_each_detailed_block(ext, cb, closure);
break;
case VTB_EXT:
vtb_for_each_detailed_block(ext, cb, closure);
break;
default:
break;
}
}
}
 
static void
is_rb(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
if (r[3] == EDID_DETAIL_MONITOR_RANGE)
if (r[15] & 0x10)
*(bool *)data = true;
}
 
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
static bool
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
bool ret;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
 
return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
}
 
static void
find_gtf2(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
*(u8 **)data = r;
}
 
/* Secondary GTF curve kicks in above some break frequency */
static int
drm_gtf2_hbreak(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? (r[12] * 2) : 0;
}
 
static int
drm_gtf2_2c(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? r[13] : 0;
}
 
static int
drm_gtf2_m(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? (r[15] << 8) + r[14] : 0;
}
 
static int
drm_gtf2_k(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? r[16] : 0;
}
 
static int
drm_gtf2_2j(struct edid *edid)
{
u8 *r = NULL;
drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
return r ? r[17] : 0;
}
 
/**
* standard_timing_level - get std. timing level(CVT/GTF/DMT)
* @edid: EDID block to scan
*/
static int standard_timing_level(struct edid *edid)
{
if (edid->revision >= 2) {
if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
return LEVEL_GTF;
}
return LEVEL_DMT;
}
 
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
* monitors fill with ascii space (0x20) instead.
533,16 → 659,13
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
*
* Punts for now, but should eventually use the FB layer's CVT based mode
* generation code.
*/
struct drm_display_mode *drm_mode_std(struct drm_device *dev,
struct std_timing *t,
int revision,
int timing_level)
static struct drm_display_mode *
drm_mode_std(struct drm_connector *connector, struct edid *edid,
struct std_timing *t, int revision)
{
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
549,6 → 672,7
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
int timing_level = standard_timing_level(edid);
 
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
569,18 → 693,38
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
/* HDTV hack */
if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
 
/* HDTV hack, part 1 */
if (vrefresh_rate == 60 &&
((hsize == 1360 && vsize == 765) ||
(hsize == 1368 && vsize == 769))) {
hsize = 1366;
vsize = 768;
}
 
/*
* If this connector already has a mode for this size and refresh
* rate (because it came from detailed or CVT info), use that
* instead. This way we don't have to guess at interlace or
* reduced blanking.
*/
list_for_each_entry(m, &connector->probed_modes, head)
if (m->hdisplay == hsize && m->vdisplay == vsize &&
drm_mode_vrefresh(m) == vrefresh_rate)
return NULL;
 
/* HDTV hack, part 2 */
if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1;
mode->vsync_end = mode->vsync_end - 1;
mode->hsync_start = mode->hsync_start - 1;
mode->hsync_end = mode->hsync_end - 1;
return mode;
}
mode = NULL;
 
/* check whether it can be found in default mode table */
mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
return mode;
 
590,6 → 734,23
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
case LEVEL_GTF2:
/*
* This is potentially wrong if there's ever a monitor with
* more than one ranges section, each claiming a different
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
kfree(mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
drm_gtf2_2c(edid),
drm_gtf2_k(edid),
drm_gtf2_2j(edid));
}
break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
622,13 → 783,11
{ 1440, 576 },
{ 2880, 576 },
};
static const int n_sizes =
sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
 
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
 
for (i = 0; i < n_sizes; i++) {
for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
707,15 → 866,6
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
 
/* perform the basic check for the detailed timing */
if (mode->hsync_end > mode->htotal ||
mode->vsync_end > mode->vtotal) {
drm_mode_destroy(dev, mode);
DRM_DEBUG_KMS("Incorrect detailed timing. "
"Sync is beyond the blank.\n");
return NULL;
}
 
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
722,10 → 872,10
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
 
drm_mode_do_interlace_quirk(mode, pt);
 
drm_mode_set_name(mode);
 
drm_mode_do_interlace_quirk(mode, pt);
 
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
751,67 → 901,183
return mode;
}
 
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
 
static bool
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
 
hmin = t[7];
if (edid->revision >= 4)
hmin += ((t[4] & 0x04) ? 255 : 0);
hmax = t[8];
if (edid->revision >= 4)
hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
 
return (hsync <= hmax && hsync >= hmin);
}
 
static bool
mode_in_vsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
 
vmin = t[5];
if (edid->revision >= 4)
vmin += ((t[4] & 0x01) ? 255 : 0);
vmax = t[6];
if (edid->revision >= 4)
vmax += ((t[4] & 0x02) ? 255 : 0);
vsync = drm_mode_vrefresh(mode);
 
return (vsync <= vmax && vsync >= vmin);
}
 
static u32
range_pixel_clock(struct edid *edid, u8 *t)
{
/* unspecified */
if (t[9] == 0 || t[9] == 255)
return 0;
 
/* 1.4 with CVT support gives us real precision, yay */
if (edid->revision >= 4 && t[10] == 0x04)
return (t[9] * 10000) - ((t[12] >> 2) * 250);
 
/* 1.3 is pathetic, so fuzz up a bit */
return t[9] * 10000 + 5001;
}
 
static bool
mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
u8 *t = (u8 *)timing;
 
if (!mode_in_hsync_range(mode, edid, t))
return false;
 
if (!mode_in_vsync_range(mode, edid, t))
return false;
 
if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
 
/* 1.4 max horizontal check */
if (edid->revision >= 4 && t[10] == 0x04)
if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
return false;
 
if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
return false;
 
return true;
}
 
/*
* Detailed mode info for the EDID "established modes" data to use.
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
846, 900, 0, 400, 421, 423, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
846, 900, 0, 400, 412, 414, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
928, 1152, 0, 624, 625, 628, 667, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
 
return modes;
}
 
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
 
if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
}
 
static int
add_inferred_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
 
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
if (version_greater(edid, 1, 0))
drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
&closure);
 
return closure.modes;
}
 
static int
drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
u8 *est = ((u8 *)timing) + 5;
 
for (i = 0; i < 6; i++) {
for (j = 7; j > 0; j--) {
m = (i * 8) + (7 - j);
if (m >= ARRAY_SIZE(est3_modes))
break;
if (est[i] & (1 << j)) {
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
est3_modes[m].r
/*, est3_modes[m].rb */);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
}
}
}
}
 
return modes;
}
 
static void
do_established_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
 
if (data->type == EDID_DETAIL_EST_TIMINGS)
closure->modes += drm_est3_modes(closure->connector, timing);
}
 
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
819,7 → 1085,8
* Each EDID block contains a bitmap of the supported "established modes" list
* (defined above). Tease them out and add them to the global modes list.
*/
static int add_established_modes(struct drm_connector *connector, struct edid *edid)
static int
add_established_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
unsigned long est_bits = edid->established_timings.t1 |
826,8 → 1093,11
(edid->established_timings.t2 << 8) |
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
 
for (i = 0; i <= EDID_EST_TIMINGS; i++)
for (i = 0; i <= EDID_EST_TIMINGS; i++) {
if (est_bits & (1<<i)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
836,48 → 1106,61
modes++;
}
}
}
 
return modes;
if (version_greater(edid, 1, 0))
drm_for_each_detailed_block((u8 *)edid,
do_established_modes, &closure);
 
return modes + closure.modes;
}
/**
* stanard_timing_level - get std. timing level(CVT/GTF/DMT)
* @edid: EDID block to scan
*/
static int standard_timing_level(struct edid *edid)
 
static void
do_standard_modes(struct detailed_timing *timing, void *c)
{
if (edid->revision >= 2) {
if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
return LEVEL_CVT;
return LEVEL_GTF;
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
struct edid *edid = closure->edid;
 
if (data->type == EDID_DETAIL_STD_MODES) {
int i;
for (i = 0; i < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
 
std = &data->data.timings[i];
newmode = drm_mode_std(connector, edid, std,
edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
}
return LEVEL_DMT;
}
}
}
 
/**
* add_standard_modes - get std. modes from EDID and add them
* @edid: EDID block to scan
*
* Standard modes can be calculated using the CVT standard. Grab them from
* @edid, calculate them, and add them to the list.
* Standard modes can be calculated using the appropriate standard (DMT,
* GTF or CVT. Grab them from @edid and add them to the list.
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
static int
add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
int i, modes = 0;
int timing_level;
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
 
timing_level = standard_timing_level(edid);
 
for (i = 0; i < EDID_STD_TIMINGS; i++) {
struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
 
/* If std timings bytes are 1, 1 it's empty */
if (t->hsize == 1 && t->vfreq_aspect == 1)
continue;
 
newmode = drm_mode_std(dev, &edid->standard_timings[i],
edid->revision, timing_level);
newmode = drm_mode_std(connector, edid,
&edid->standard_timings[i],
edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
884,67 → 1167,15
}
}
 
return modes;
}
if (version_greater(edid, 1, 0))
drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
&closure);
 
/*
* XXX fix this for:
* - GTF secondary curve formula
* - EDID 1.4 range offsets
* - CVT extended bits
*/
static bool
mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
{
struct detailed_data_monitor_range *range;
int hsync, vrefresh;
/* XXX should also look for standard codes in VTB blocks */
 
range = &timing->data.other_data.data.range;
 
hsync = drm_mode_hsync(mode);
vrefresh = drm_mode_vrefresh(mode);
 
if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
return false;
 
if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
return false;
 
if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
/* be forgiving since it's in units of 10MHz */
int max_clock = range->pixel_clock_mhz * 10 + 9;
max_clock *= 1000;
if (mode->clock > max_clock)
return false;
return modes + closure.modes;
}
 
return true;
}
 
/*
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static int drm_gtf_modes_for_range(struct drm_connector *connector,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
 
return modes;
}
 
static int drm_cvt_modes(struct drm_connector *connector,
struct detailed_timing *timing)
{
994,281 → 1225,111
return modes;
}
 
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
static void
do_cvt_mode(struct detailed_timing *timing, void *c)
{
int i, modes = 0;
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
if (timing->pixel_clock) {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
return 0;
if (data->type == EDID_DETAIL_CVT_3BYTE)
closure->modes += drm_cvt_modes(closure->connector, timing);
}
 
if (preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
};
 
drm_mode_probed_add(connector, newmode);
return 1;
}
if (version_greater(edid, 1, 2))
drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
 
/* other timing types */
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
modes += drm_gtf_modes_for_range(connector, timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
for (i = 0; i < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
/* XXX should also look for CVT codes in VTB blocks */
 
std = &data->data.timings[i];
newmode = drm_mode_std(dev, std, edid->revision,
timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
return closure.modes;
}
}
break;
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
default:
break;
}
 
return modes;
}
 
/**
* add_detailed_info - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
*
* Some of the detailed timing sections may contain mode information. Grab
* it and add it to the list.
*/
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
static void
do_detailed_mode(struct detailed_timing *timing, void *c)
{
int i, modes = 0;
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
 
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
if (timing->pixel_clock) {
newmode = drm_mode_detailed(closure->connector->dev,
closure->edid, timing,
closure->quirks);
if (!newmode)
return;
 
/* In 1.0, only timings are allowed */
if (!timing->pixel_clock && edid->version == 1 &&
edid->revision == 0)
continue;
if (closure->preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
 
modes += add_detailed_modes(connector, timing, edid, quirks,
preferred);
drm_mode_probed_add(closure->connector, newmode);
closure->modes++;
closure->preferred = 0;
}
 
return modes;
}
 
/**
* add_detailed_mode_eedid - get detailed mode info from addtional timing
* EDID block
/*
* add_detailed_modes - Add modes from detailed timings
* @connector: attached connector
* @edid: EDID block to scan(It is only to get addtional timing EDID block)
* @edid: EDID block to scan
* @quirks: quirks to apply
*
* Some of the detailed timing sections may contain mode information. Grab
* it and add it to the list.
*/
static int add_detailed_info_eedid(struct drm_connector *connector,
struct edid *edid, u32 quirks)
static int
add_detailed_modes(struct drm_connector *connector, struct edid *edid,
u32 quirks)
{
int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
int edid_ext_num;
int start_offset, end_offset;
int timing_level;
struct detailed_mode_closure closure = {
connector,
edid,
1,
quirks,
0
};
 
if (edid->version == 1 && edid->revision < 3) {
/* If the EDID version is less than 1.3, there is no
* extension EDID.
*/
return 0;
}
if (!edid->extensions) {
/* if there is no extension EDID, it is unnecessary to
* parse the E-EDID to get detailed info
*/
return 0;
}
if (closure.preferred && !version_greater(edid, 1, 3))
closure.preferred =
(edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
 
/* Chose real EDID extension number */
edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
DRM_MAX_EDID_EXT_NUM : edid->extensions;
drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
 
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
return closure.modes;
}
 
if (i == edid_ext_num) {
/* if there is no additional timing EDID block, return */
return 0;
}
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VENDOR_BLOCK 0x03
#define EDID_BASIC_AUDIO (1 << 6)
 
/* Get the start offset of detailed timing block */
start_offset = edid_ext[2];
if (start_offset == 0) {
/* If the start_offset is zero, it means that neither detailed
* info nor data block exist. In such case it is also
* unnecessary to parse the detailed timing info.
*/
return 0;
}
 
timing_level = standard_timing_level(edid);
end_offset = EDID_LENGTH;
end_offset -= sizeof(struct detailed_timing);
for (i = start_offset; i < end_offset;
i += sizeof(struct detailed_timing)) {
timing = (struct detailed_timing *)(edid_ext + i);
modes += add_detailed_modes(connector, timing, edid, quirks, 0);
}
 
return modes;
}
 
#define DDC_ADDR 0x50
/**
* Get EDID information via I2C.
*
* \param adapter : i2c device adaptor
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
* Search EDID for CEA extension block.
*/
int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
unsigned char *buf, int len)
u8 *drm_find_cea_extension(struct edid *edid)
{
unsigned char start = 0x0;
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
 
if (i2c_transfer(adapter, msgs, 2) == 2)
return 0;
 
return -1;
}
EXPORT_SYMBOL(drm_do_probe_ddc_edid);
 
static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
u8 *edid_ext = NULL;
int i;
 
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, buf, len))
return -1;
if (drm_edid_is_valid((struct edid *)buf))
return 0;
}
/* No EDID or EDID extensions */
if (edid == NULL || edid->extensions == 0)
return NULL;
 
/* repeated checksum failures; warn, but carry on */
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return -1;
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
if (edid_ext[0] == CEA_EXT)
break;
}
 
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
*
* Poke the given connector's i2c channel to grab EDID data if possible.
*
* Return edid data or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
int ret;
struct edid *edid;
if (i == edid->extensions)
return NULL;
 
edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);
if (edid == NULL) {
dev_warn(&connector->dev->pdev->dev,
"Failed to allocate EDID\n");
goto end;
return edid_ext;
}
EXPORT_SYMBOL(drm_find_cea_extension);
 
/* Read first EDID block */
ret = drm_ddc_read_edid(connector, adapter,
(unsigned char *)edid, EDID_LENGTH);
if (ret != 0)
goto clean_up;
 
/* There are EDID extensions to be read */
if (edid->extensions != 0) {
int edid_ext_num = edid->extensions;
 
if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
dev_warn(&connector->dev->pdev->dev,
"The number of extension(%d) is "
"over max (%d), actually read number (%d)\n",
edid_ext_num, DRM_MAX_EDID_EXT_NUM,
DRM_MAX_EDID_EXT_NUM);
/* Reset EDID extension number to be read */
edid_ext_num = DRM_MAX_EDID_EXT_NUM;
}
/* Read EDID including extensions too */
ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
EDID_LENGTH * (edid_ext_num + 1));
if (ret != 0)
goto clean_up;
 
}
 
connector->display_info.raw_edid = (char *)edid;
goto end;
 
clean_up:
kfree(edid);
edid = NULL;
end:
return edid;
 
}
EXPORT_SYMBOL(drm_get_edid);
 
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* @edid: monitor EDID information
1278,30 → 1339,15
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
int i, hdmi_id, edid_ext_num;
u8 *edid_ext;
int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
 
/* No EDID or EDID extensions */
if (edid == NULL || edid->extensions == 0)
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
 
/* Chose real EDID extension number */
edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
DRM_MAX_EDID_EXT_NUM : edid->extensions;
 
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
 
if (i == edid_ext_num)
goto end;
 
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
1330,6 → 1376,111
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
/**
* drm_detect_monitor_audio - check monitor audio capability
*
* Monitor should have CEA extension block.
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
* audio' only. If there is any audio extension block and supported
* audio format, assume at least 'basic audio' support, even if 'basic
* audio' is not defined in EDID.
*
*/
bool drm_detect_monitor_audio(struct edid *edid)
{
u8 *edid_ext;
int i, j;
bool has_audio = false;
int start_offset, end_offset;
 
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
 
has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
 
if (has_audio) {
DRM_DEBUG_KMS("Monitor has basic audio support\n");
goto end;
}
 
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
 
for (i = start_offset; i < end_offset;
i += ((edid_ext[i] & 0x1f) + 1)) {
if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
has_audio = true;
for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
}
}
end:
return has_audio;
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
 
/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
*
* Grab any available display info and stuff it into the drm_display_info
* structure that's part of the connector. Useful for tracking bpp and
* color spaces.
*/
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
{
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
 
/* driver figures it out in this case */
info->bpc = 0;
info->color_formats = 0;
 
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
 
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
 
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
break;
case DRM_EDID_DIGITAL_DEPTH_8:
info->bpc = 8;
break;
case DRM_EDID_DIGITAL_DEPTH_10:
info->bpc = 10;
break;
case DRM_EDID_DIGITAL_DEPTH_12:
info->bpc = 12;
break;
case DRM_EDID_DIGITAL_DEPTH_14:
info->bpc = 14;
break;
case DRM_EDID_DIGITAL_DEPTH_16:
info->bpc = 16;
break;
case DRM_EDID_DIGITAL_DEPTH_UNDEF:
default:
info->bpc = 0;
break;
}
 
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
}
 
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
1347,7 → 1498,7
return 0;
}
if (!drm_edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
}
1354,31 → 1505,30
 
quirks = edid_get_quirks(edid);
 
/*
* EDID spec says modes should be preferred in this order:
* - preferred detailed mode
* - other detailed modes from base block
* - detailed modes from extension blocks
* - CVT 3-byte code modes
* - standard timing codes
* - established timing codes
* - modes inferred from GTF or CVT range information
*
* We get this pretty much right.
*
* XXX order for additional mode types in extension blocks?
*/
num_modes += add_detailed_modes(connector, edid, quirks);
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_detailed_info(connector, edid, quirks);
num_modes += add_detailed_info_eedid(connector, edid, quirks);
num_modes += add_inferred_modes(connector, edid);
 
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
 
connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
connector->display_info.width_mm = edid->width_cm * 10;
connector->display_info.height_mm = edid->height_cm * 10;
connector->display_info.gamma = edid->gamma;
connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
connector->display_info.gamma = edid->gamma;
drm_add_display_info(edid, &connector->display_info);
 
return num_modes;
}
1399,7 → 1549,7
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
struct drm_display_mode *mode, *ptr;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
 
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
1409,7 → 1559,7
vdisplay = 0;
 
for (i = 0; i < count; i++) {
ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
/drivers/video/drm/drm_edid_modes.h
0,0 → 1,380
/*
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
* Copyright 2010 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/kernel.h>
#include "drmP.h"
#include "drm_edid.h"
 
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x400@85Hz */
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 400, 401, 404, 445, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 720x400@85Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
828, 936, 0, 400, 401, 404, 446, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@85Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
752, 832, 0, 480, 481, 484, 509, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@56Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@72Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@85Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@75Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
1488, 1696, 0, 768, 771, 778, 805, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@85Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@75Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
1488, 1696, 0, 800, 803, 809, 838, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@85Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@85Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@75Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@85Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@75Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
1688, 1936, 0, 900, 903, 909, 942, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@85Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@65Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@70Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@75Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@85Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1729x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@75Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@75Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@85Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@75Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@75HZ */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@85HZ */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
846, 900, 0, 400, 421, 423, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
846, 900, 0, 400, 412, 414, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
928, 1152, 0, 624, 625, 628, 667, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
 
static const struct {
short w;
short h;
short r;
short rb;
} est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
{ 720, 400, 85, 0 },
{ 640, 480, 85, 0 },
{ 848, 480, 60, 0 },
{ 800, 600, 85, 0 },
{ 1024, 768, 85, 0 },
{ 1152, 864, 75, 0 },
/* byte 7 */
{ 1280, 768, 60, 1 },
{ 1280, 768, 60, 0 },
{ 1280, 768, 75, 0 },
{ 1280, 768, 85, 0 },
{ 1280, 960, 60, 0 },
{ 1280, 960, 85, 0 },
{ 1280, 1024, 60, 0 },
{ 1280, 1024, 85, 0 },
/* byte 8 */
{ 1360, 768, 60, 0 },
{ 1440, 900, 60, 1 },
{ 1440, 900, 60, 0 },
{ 1440, 900, 75, 0 },
{ 1440, 900, 85, 0 },
{ 1400, 1050, 60, 1 },
{ 1400, 1050, 60, 0 },
{ 1400, 1050, 75, 0 },
/* byte 9 */
{ 1400, 1050, 85, 0 },
{ 1680, 1050, 60, 1 },
{ 1680, 1050, 60, 0 },
{ 1680, 1050, 75, 0 },
{ 1680, 1050, 85, 0 },
{ 1600, 1200, 60, 0 },
{ 1600, 1200, 65, 0 },
{ 1600, 1200, 70, 0 },
/* byte 10 */
{ 1600, 1200, 75, 0 },
{ 1600, 1200, 85, 0 },
{ 1792, 1344, 60, 0 },
{ 1792, 1344, 85, 0 },
{ 1856, 1392, 60, 0 },
{ 1856, 1392, 75, 0 },
{ 1920, 1200, 60, 1 },
{ 1920, 1200, 60, 0 },
/* byte 11 */
{ 1920, 1200, 75, 0 },
{ 1920, 1200, 85, 0 },
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
/drivers/video/drm/drm_fb_helper.c
28,7 → 28,8
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/kernel.h>
//#include <linux/sysrq.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include "drmP.h"
#include "drm_crtc.h"
41,49 → 42,57
 
static LIST_HEAD(kernel_fb_helper_list);
 
int drm_fb_helper_add_connector(struct drm_connector *connector)
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
if (!connector->fb_helper_private)
return -ENOMEM;
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
int i;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_helper_connector;
 
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
if (!fb_helper_connector)
goto fail;
 
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
kfree(fb_helper->connector_info[i]);
fb_helper->connector_info[i] = NULL;
}
EXPORT_SYMBOL(drm_fb_helper_add_connector);
fb_helper->connector_count = 0;
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
bool drm_fb_helper_force_kernel_mode(void)
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
int i = 0;
bool ret, error = false;
struct drm_fb_helper *helper;
uint16_t *r_base, *g_base, *b_base;
int i;
 
if (list_empty(&kernel_fb_helper_list))
return false;
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
 
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
for (i = 0; i < helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
ret = drm_crtc_helper_set_config(mode_set);
if (ret)
error = true;
for (i = 0; i < crtc->gamma_size; i++)
helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
}
}
return error;
}
 
/**
* drm_fb_helper_restore - restore the framebuffer console (kernel) config
*
* Restore's the kernel's fbcon mode, used for lastclose & panic paths.
*/
void drm_fb_helper_restore(void)
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
bool ret;
ret = drm_fb_helper_force_kernel_mode();
if (ret == true)
DRM_ERROR("Failed to restore crtc configuration\n");
uint16_t *r_base, *g_base, *b_base;
 
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
 
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
EXPORT_SYMBOL(drm_fb_helper_restore);
 
 
static void drm_fb_helper_on(struct fb_info *info)
91,27 → 100,33
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i;
int i, j;
 
/*
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
/* Only mess with CRTCs in this fb */
if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
!crtc->enabled)
if (!crtc->enabled)
continue;
 
mutex_lock(&dev->mode_config.mutex);
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
mutex_unlock(&dev->mode_config.mutex);
 
/* Walk the connectors & encoders on this fb turning them on */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
DRM_MODE_DPMS_ON);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
118,14 → 133,12
struct drm_encoder_helper_funcs *encoder_funcs;
 
encoder_funcs = encoder->helper_private;
mutex_lock(&dev->mode_config.mutex);
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
mutex_unlock(&dev->mode_config.mutex);
}
}
}
mutex_unlock(&dev->mode_config.mutex);
}
}
 
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
{
132,23 → 145,31
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i;
int i, j;
 
/*
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
/* Only mess with CRTCs in this fb */
if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
!crtc->enabled)
if (!crtc->enabled)
continue;
 
/* Walk the connectors on this fb and mark them off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = dpms_mode;
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
dpms_mode);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
155,17 → 176,13
struct drm_encoder_helper_funcs *encoder_funcs;
 
encoder_funcs = encoder->helper_private;
mutex_lock(&dev->mode_config.mutex);
encoder_funcs->dpms(encoder, dpms_mode);
mutex_unlock(&dev->mode_config.mutex);
}
}
mutex_lock(&dev->mode_config.mutex);
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
mutex_unlock(&dev->mode_config.mutex);
}
}
}
 
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
199,50 → 216,64
{
int i;
 
for (i = 0; i < helper->connector_count; i++)
kfree(helper->connector_info[i]);
kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
kfree(helper->crtc_info[i].mode_set.connectors);
kfree(helper->crtc_info);
}
 
int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
{
struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
int ret = 0;
int i;
 
helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!helper->crtc_info)
fb_helper->dev = dev;
 
INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
 
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
return -ENOMEM;
 
helper->crtc_count = crtc_count;
fb_helper->crtc_count = crtc_count;
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
if (!fb_helper->connector_info) {
kfree(fb_helper->crtc_info);
return -ENOMEM;
}
fb_helper->connector_count = 0;
 
for (i = 0; i < crtc_count; i++) {
helper->crtc_info[i].mode_set.connectors =
fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
 
if (!helper->crtc_info[i].mode_set.connectors) {
if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
helper->crtc_info[i].mode_set.num_connectors = 0;
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
 
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
helper->crtc_info[i].crtc_id = crtc->base.id;
helper->crtc_info[i].mode_set.crtc = crtc;
fb_helper->crtc_info[i].crtc_id = crtc->base.id;
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
helper->conn_limit = max_conn_count;
fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
drm_fb_helper_crtc_free(helper);
drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
EXPORT_SYMBOL(drm_fb_helper_init);
 
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
264,6 → 295,11
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
if (info->var.transp.length > 0) {
u32 mask = (1 << info->var.transp.length) - 1;
mask <<= info->var.transp.offset;
value |= mask;
}
palette[regno] = value;
return 0;
}
306,20 → 342,15
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, rc = 0;
int i, j, rc = 0;
int start;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
for (i = 0; i < fb_helper->crtc_count; i++) {
if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
break;
}
if (i == fb_helper->crtc_count)
continue;
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
red = cmap->red;
green = cmap->green;
327,7 → 358,7
transp = cmap->transp;
start = cmap->start;
 
for (i = 0; i < cmap->len; i++) {
for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
 
hred = *red++;
347,41 → 378,6
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
 
int drm_fb_helper_setcolreg(unsigned regno,
unsigned red,
unsigned green,
unsigned blue,
unsigned transp,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
int i;
int ret;
 
if (regno > 255)
return 1;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
for (i = 0; i < fb_helper->crtc_count; i++) {
if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
break;
}
if (i == fb_helper->crtc_count)
continue;
 
ret = setcolreg(crtc, red, green, blue, regno, info);
if (ret)
return ret;
 
crtc_funcs->load_lut(crtc);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_setcolreg);
 
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
389,7 → 385,7
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
 
if (var->pixclock != 0)
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
 
/* Need to resize the fb object !!! */
485,23 → 481,21
return -EINVAL;
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
break;
}
if (i == fb_helper->crtc_count)
continue;
 
if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
mutex_lock(&dev->mode_config.mutex);
crtc = fb_helper->crtc_info[i].mode_set.crtc;
ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
if (ret) {
mutex_unlock(&dev->mode_config.mutex);
if (ret)
return ret;
}
}
mutex_unlock(&dev->mode_config.mutex);
 
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
// drm_fb_helper_hotplug_event(fb_helper);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
516,15 → 510,10
int ret = 0;
int i;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
break;
}
crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
if (i == fb_helper->crtc_count)
continue;
 
modeset = &fb_helper->crtc_info[i].mode_set;
 
modeset->x = var->xoffset;
531,9 → 520,7
modeset->y = var->yoffset;
 
if (modeset->num_connectors) {
mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
540,180 → 527,98
}
}
}
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
 
int drm_fb_helper_single_fb_probe(struct drm_device *dev,
int preferred_bpp,
int (*fb_create)(struct drm_device *dev,
uint32_t fb_width,
uint32_t fb_height,
uint32_t surface_width,
uint32_t surface_height,
uint32_t surface_depth,
uint32_t surface_bpp,
struct drm_framebuffer **fb_ptr))
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
{
struct drm_crtc *crtc;
struct drm_connector *connector;
unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
unsigned int surface_width = 0, surface_height = 0;
int new_fb = 0;
int crtc_count = 0;
int ret, i, conn_count = 0;
int i;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_set *modeset = NULL;
struct drm_fb_helper *fb_helper;
uint32_t surface_depth = 24, surface_bpp = 32;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
 
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
if (preferred_bpp != surface_bpp) {
surface_depth = surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
sizes.fb_width = (unsigned)-1;
sizes.fb_height = (unsigned)-1;
 
struct drm_fb_helper_cmdline_mode *cmdline_mode;
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
 
if (!fb_help_conn)
continue;
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
desired_mode = fb_helper->crtc_info[i].desired_mode;
cmdline_mode = &fb_help_conn->cmdline_mode;
 
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
surface_depth = surface_bpp = 8;
break;
case 15:
surface_depth = 15;
surface_bpp = 16;
break;
case 16:
surface_depth = surface_bpp = 16;
break;
case 24:
surface_depth = surface_bpp = 24;
break;
case 32:
surface_depth = 24;
surface_bpp = 32;
break;
}
break;
}
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc)) {
if (crtc->desired_mode) {
if (crtc->desired_mode->hdisplay < fb_width)
fb_width = crtc->desired_mode->hdisplay;
 
if (crtc->desired_mode->vdisplay < fb_height)
fb_height = crtc->desired_mode->vdisplay;
 
if (crtc->desired_mode->hdisplay > surface_width)
surface_width = crtc->desired_mode->hdisplay;
 
if (crtc->desired_mode->vdisplay > surface_height)
surface_height = crtc->desired_mode->vdisplay;
}
if (desired_mode) {
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
if (desired_mode->hdisplay < sizes.fb_width)
sizes.fb_width = desired_mode->hdisplay;
if (desired_mode->vdisplay < sizes.fb_height)
sizes.fb_height = desired_mode->vdisplay;
if (desired_mode->hdisplay > sizes.surface_width)
sizes.surface_width = desired_mode->hdisplay;
if (desired_mode->vdisplay > sizes.surface_height)
sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
 
if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
return 0;
DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
sizes.fb_width = sizes.surface_width = 1024;
sizes.fb_height = sizes.surface_height = 768;
}
 
/* do we have an fb already? */
if (list_empty(&dev->mode_config.fb_kernel_list)) {
ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
surface_height, surface_depth, surface_bpp,
&fb);
if (ret)
return -EINVAL;
new_fb = 1;
} else {
fb = list_first_entry(&dev->mode_config.fb_kernel_list,
struct drm_framebuffer, filp_head);
/* push down into drivers */
 
/* if someone hotplugs something bigger than we have already allocated, we are pwned.
As really we can't resize an fbdev that is in the wild currently due to fbdev
not really being designed for the lower layers moving stuff around under it.
- so in the grand style of things - punt. */
if ((fb->width < surface_width) ||
(fb->height < surface_height)) {
DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
return -EINVAL;
}
}
DRM_INFO("enter fb probe\n");
 
info = fb->fbdev;
fb_helper = info->par;
 
crtc_count = 0;
/* okay we need to setup new connector sets in the crtcs */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
modeset = &fb_helper->crtc_info[crtc_count].mode_set;
modeset->fb = fb;
conn_count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder)
if (connector->encoder->crtc == modeset->crtc) {
modeset->connectors[conn_count] = connector;
conn_count++;
if (conn_count > fb_helper->conn_limit)
BUG();
}
}
new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (new_fb < 0)
return new_fb;
 
for (i = conn_count; i < fb_helper->conn_limit; i++)
modeset->connectors[i] = NULL;
info = fb_helper->fbdev;
 
modeset->crtc = crtc;
crtc_count++;
#if 0
 
modeset->num_connectors = conn_count;
if (modeset->crtc->desired_mode) {
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
modeset->crtc->desired_mode);
/* set the fb pointer */
for (i = 0; i < fb_helper->crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
}
fb_helper->crtc_count = crtc_count;
fb_helper->fb = fb;
 
if (new_fb) {
info->var.pixclock = 0;
 
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
 
} else {
drm_fb_helper_set_par(info);
}
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
 
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
 
if (new_fb)
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
 
#endif
 
LEAVE();
 
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
void drm_fb_helper_free(struct drm_fb_helper *helper)
{
list_del(&helper->kernel_fb_list);
drm_fb_helper_crtc_free(helper);
}
EXPORT_SYMBOL(drm_fb_helper_free);
 
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
720,6 → 625,8
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
732,13 → 639,15
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
 
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
info->pseudo_palette = fb->pseudo_palette;
struct drm_framebuffer *fb = fb_helper->fb;
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
803,3 → 712,313
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
 
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
uint32_t maxX,
uint32_t maxY)
{
struct drm_connector *connector;
int count = 0;
int i;
 
for (i = 0; i < fb_helper->connector_count; i++) {
connector = fb_helper->connector_info[i]->connector;
count += connector->funcs->fill_modes(connector, maxX, maxY);
}
 
return count;
}
 
static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, &fb_connector->connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
 
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
struct drm_cmdline_mode *cmdline_mode;
cmdline_mode = &fb_connector->cmdline_mode;
return cmdline_mode->specified;
}
 
 
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
 
if (strict) {
enable = connector->status == connector_status_connected;
} else {
enable = connector->status != connector_status_disconnected;
}
return enable;
}
 
static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
 
for (i = 0; i < fb_helper->connector_count; i++) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
enabled[i] ? "yes" : "no");
any_enabled |= enabled[i];
}
 
if (any_enabled)
return;
 
for (i = 0; i < fb_helper->connector_count; i++) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, false);
}
}
 
 
static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
 
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
 
if (enabled[i] == false)
continue;
 
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
fb_helper_conn->connector->base.id);
 
/* got for command line mode first */
// modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
 
modes[i] = NULL;
 
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
fb_helper_conn->connector->base.id);
modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
break;
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
}
return true;
}
 
static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
int c, o;
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *best_crtc;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
 
if (n == fb_helper->connector_count)
return 0;
 
fb_helper_conn = fb_helper->connector_info[n];
connector = fb_helper_conn->connector;
 
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
 
crtcs = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
 
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_cmdline_mode(fb_helper_conn))
my_score++;
if (drm_has_preferred_mode(fb_helper_conn, width, height))
my_score++;
 
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (!encoder)
goto out;
 
/* select a crtc for this connector and then attempt to configure
remaining connectors */
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
 
if ((encoder->possible_crtcs & (1 << c)) == 0) {
continue;
}
 
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
 
if (o < n) {
/* ignore cloning unless only a single crtc */
if (fb_helper->crtc_count > 1)
continue;
 
if (!drm_mode_equal(modes[o], modes[n]))
continue;
}
 
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *));
}
}
out:
kfree(crtcs);
return best_score;
}
 
static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
int i, ret;
 
DRM_DEBUG_KMS("\n");
 
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
 
/* clean out all the encoder/crtc combos */
// list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
// encoder->crtc = NULL;
// }
 
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
 
drm_enable_connectors(fb_helper, enabled);
 
//ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
 
ret = 0;
 
if (!ret) {
ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
if (!ret)
DRM_ERROR("Unable to find initial modes\n");
}
 
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
 
drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
 
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
}
 
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
modeset = &fb_crtc->mode_set;
 
if (mode && fb_crtc) {
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
mode->name, fb_crtc->mode_set.crtc->base.id);
fb_crtc->desired_mode = mode;
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
}
}
 
kfree(crtcs);
kfree(modes);
kfree(enabled);
}
 
/**
* drm_helper_initial_config - setup a sane initial connector configuration
* @dev: DRM device
*
* LOCKING:
* Called at init time, must take mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
 
/* disable all the possible outputs/crtcs before entering KMS mode */
// drm_helper_disable_unused_functions(fb_helper->dev);
 
// drm_fb_helper_parse_command_line(fb_helper);
 
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
dev->mode_config.max_height);
/*
* we shouldn't end up with no modes here.
*/
if (count == 0) {
printk(KERN_INFO "No connectors reported connected with modes\n");
}
drm_setup_crtcs(fb_helper);
 
 
 
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
/drivers/video/drm/drm_irq.c
0,0 → 1,136
/**
* \file drm_irq.c
* IRQ support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
 
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include "drmP.h"
#include <asm/div64.h>
//#include "drm_trace.h"
 
//#include <linux/interrupt.h> /* For task queue support */
#include <linux/slab.h>
 
//#include <linux/vgaarb.h>
 
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) ( \
(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
((count) % DRM_VBLANKTIME_RBSIZE)])
 
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
#define DRM_TIMESTAMP_MAXRETRIES 3
 
/* Threshold in nanoseconds for detection of redundant
* vblank irq in drm_handle_vblank(). 1 msec should be ok.
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
 
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
 
 
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high, d;
 
high = divisor >> 32;
if (high) {
unsigned int shift = fls(high);
 
d = divisor >> shift;
dividend >>= shift;
} else
d = divisor;
 
return div_u64(dividend, d);
}
 
 
/**
* drm_calc_timestamping_constants - Calculate and
* store various constants which are later needed by
* vblank and swap-completion timestamping, e.g, by
* drm_calc_vbltimestamp_from_scanoutpos().
* They are derived from crtc's true scanout timing,
* so they take things like panel scaling or other
* adjustments into account.
*
* @crtc drm_crtc whose timestamp constants should be updated.
*
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc)
{
s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
u64 dotclock;
 
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
 
/* Fields of interlaced scanout modes are only halve a frame duration.
* Double the dotclock to get halve the frame-/line-/pixelduration.
*/
if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
dotclock *= 2;
 
/* Valid dotclock? */
if (dotclock > 0) {
/* Convert scanline length in pixels and video dot clock to
* line duration, frame duration and pixel duration in
* nanoseconds:
*/
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
1000000000), dotclock);
framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
 
crtc->pixeldur_ns = pixeldur_ns;
crtc->linedur_ns = linedur_ns;
crtc->framedur_ns = framedur_ns;
 
DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
crtc->base.id, crtc->hwmode.crtc_htotal,
crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
(int) linedur_ns, (int) pixeldur_ns);
}
 
/drivers/video/drm/drm_mm.c
43,46 → 43,19
 
#include "drmP.h"
#include "drm_mm.h"
//#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
 
#define MM_UNUSED_TARGET 4
 
unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
 
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
 
return entry->size;
}
 
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
 
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
 
if (entry->size <= size)
return -ENOMEM;
 
entry->size -= size;
return 0;
}
 
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
 
child = kzalloc(sizeof(*child), 0);
if (atomic)
child = kzalloc(sizeof(*child), GFP_ATOMIC);
else
child = kzalloc(sizeof(*child), GFP_KERNEL);
 
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
91,8 → 64,8
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, fl_entry);
list_del(&child->fl_entry);
struct drm_mm_node, node_list);
list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
112,7 → 85,7
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
node = kmalloc(sizeof(*node), GFP_KERNEL);
node = kzalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
 
if (unlikely(node == NULL)) {
121,7 → 94,7
return ret;
}
++mm->num_unused;
list_add_tail(&node->fl_entry, &mm->unused_nodes);
list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
128,102 → 101,140
}
EXPORT_SYMBOL(drm_mm_pre_get);
 
static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size, int atomic)
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
struct drm_mm_node *child;
return hole_node->start + hole_node->size;
}
 
child = drm_mm_kmalloc(mm, atomic);
if (unlikely(child == NULL))
return -ENOMEM;
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
struct drm_mm_node *next_node =
list_entry(hole_node->node_list.next, struct drm_mm_node,
node_list);
 
child->free = 1;
child->size = size;
child->start = start;
child->mm = mm;
 
list_add_tail(&child->ml_entry, &mm->ml_entry);
list_add_tail(&child->fl_entry, &mm->fl_entry);
 
return 0;
return next_node->start;
}
 
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size,
size, atomic);
BUG_ON(!hole_node->hole_follows || node->allocated);
 
if (alignment)
tmp = hole_start % alignment;
 
if (!tmp) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
} else
wasted = alignment - tmp;
 
node->start = hole_start + wasted;
node->size = size;
node->mm = mm;
node->allocated = 1;
 
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start + node->size > hole_end);
 
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
entry->size += size;
return 0;
}
 
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
int atomic)
{
struct drm_mm_node *child;
struct drm_mm_node *node;
 
child = drm_mm_kmalloc(parent->mm, atomic);
if (unlikely(child == NULL))
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
 
INIT_LIST_HEAD(&child->fl_entry);
drm_mm_insert_helper(hole_node, node, size, alignment);
 
child->free = 0;
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
 
list_add_tail(&child->ml_entry, &parent->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
struct drm_mm_node *hole_node;
 
parent->size -= size;
parent->start += size;
return child;
hole_node = drm_mm_search_free(mm, size, alignment, 0);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper(hole_node, node, size, alignment);
 
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node);
 
 
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic)
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
BUG_ON(!hole_node->hole_follows || node->allocated);
 
if (hole_start < start)
wasted += start - hole_start;
if (alignment)
tmp = node->start % alignment;
tmp = (hole_start + wasted) % alignment;
 
if (tmp) {
align_splitoff =
drm_mm_split_at_start(node, alignment - tmp, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (tmp)
wasted += alignment - tmp;
 
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
if (!wasted) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
}
 
if (align_splitoff)
drm_mm_put_block(align_splitoff);
node->start = hole_start + wasted;
node->size = size;
node->mm = mm;
node->allocated = 1;
 
return node;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start + node->size > hole_end);
BUG_ON(node->start + node->size > end);
 
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
}
 
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long start,
230,137 → 241,146
unsigned long end,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
unsigned wasted = 0;
struct drm_mm_node *node;
 
if (node->start < start)
wasted += start - node->start;
if (alignment)
tmp = ((node->start + wasted) % alignment);
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
 
if (tmp)
wasted += alignment - tmp;
if (wasted) {
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
drm_mm_insert_helper_range(hole_node, node, size, alignment,
start, end);
 
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
 
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
start, end, 0);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper_range(hole_node, node, size, alignment,
start, end);
 
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
if (align_splitoff)
drm_mm_put_block(align_splitoff);
/**
* Remove a memory node from the allocator.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
 
return node;
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
 
prev_node =
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 
if (node->hole_follows) {
BUG_ON(drm_mm_hole_node_start(node)
== drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
BUG_ON(drm_mm_hole_node_start(node)
!= drm_mm_hole_node_end(node));
 
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
list_add(&prev_node->hole_stack, &mm->hole_stack);
} else
list_move(&prev_node->hole_stack, &mm->hole_stack);
 
list_del(&node->node_list);
node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
EXPORT_SYMBOL(drm_mm_remove_node);
 
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions.
*/
 
void drm_mm_put_block(struct drm_mm_node *cur)
void drm_mm_put_block(struct drm_mm_node *node)
{
 
struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
struct drm_mm *mm = node->mm;
 
int merged = 0;
drm_mm_remove_node(node);
 
if (cur_head->prev != root_head) {
prev_node =
list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
next_node =
list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry,
&mm->unused_nodes);
list_add(&node->node_list, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(next_node);
kfree(node);
spin_unlock(&mm->unused_lock);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
merged = 1;
}
EXPORT_SYMBOL(drm_mm_put_block);
 
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
unsigned wasted = 0;
 
if (end - start < size)
return 0;
 
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
wasted = alignment - tmp;
}
 
if (end >= start + size + wasted) {
return 1;
}
if (!merged) {
cur->free = 1;
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
spin_unlock(&mm->unused_lock);
 
return 0;
}
}
 
EXPORT_SYMBOL(drm_mm_put_block);
 
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
 
BUG_ON(mm->scanned_blocks);
 
best = NULL;
best_size = ~0UL;
 
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
 
if (entry->size < size)
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
BUG_ON(!entry->hole_follows);
if (!check_free_hole(drm_mm_hole_node_start(entry),
drm_mm_hole_node_end(entry),
size, alignment))
continue;
 
if (alignment) {
register unsigned tmp = entry->start % alignment;
if (tmp)
wasted += alignment - tmp;
}
 
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
 
if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
 
return best;
}
373,53 → 393,203
unsigned long end,
int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
 
BUG_ON(mm->scanned_blocks);
 
best = NULL;
best_size = ~0UL;
 
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
start : drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
end : drm_mm_hole_node_end(entry);
 
if (entry->size < size)
BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
if (entry->start > end || (entry->start+entry->size) < start)
continue;
 
if (entry->start < start)
wasted += start - entry->start;
 
if (alignment) {
register unsigned tmp = (entry->start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
}
 
if (entry->size >= size + wasted &&
(entry->start + wasted + size) <= end) {
if (!best_match)
return entry;
 
if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
 
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
list_replace(&old->hole_stack, &new->hole_stack);
new->hole_follows = old->hole_follows;
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
 
old->allocated = 0;
new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);
 
/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
* hole.
*
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment)
{
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
 
/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
* hole. This version is for range-restricted scans.
*
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
 
/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
* Returns non-zero, if a hole has been found, zero otherwise.
*/
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
 
mm->scanned_blocks++;
 
BUG_ON(node->scanned_block);
node->scanned_block = 1;
 
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
node_list);
 
node->scanned_preceeds_hole = prev_node->hole_follows;
prev_node->hole_follows = 1;
list_del(&node->node_list);
node->node_list.prev = &prev_node->node_list;
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
 
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
adj_start = hole_start < mm->scan_start ?
mm->scan_start : hole_start;
adj_end = hole_end > mm->scan_end ?
mm->scan_end : hole_end;
} else {
adj_start = hole_start;
adj_end = hole_end;
}
 
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
 
return 1;
}
 
return 0;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
 
/**
* Remove a node from the scan list.
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added, otherwise the internal state of the memory manager will be
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_search_free with best_match = 0 will then return
* the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
*/
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
 
mm->scanned_blocks--;
 
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
 
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
node_list);
 
prev_node->hole_follows = node->scanned_preceeds_hole;
INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
 
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
* stored. */
if (node->start >= mm->scan_hit_start &&
node->start + node->size
<= mm->scan_hit_start + mm->scan_hit_size) {
return 1;
}
 
return 0;
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
struct list_head *head = &mm->head_node.node_list;
 
return (head->next->next == head);
}
427,37 → 597,40
 
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
 
return drm_mm_create_tail_node(mm, start, size, 0);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
INIT_LIST_HEAD(&mm->head_node.hole_stack);
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
mm->head_node.scanned_next_free = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
 
void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
struct drm_mm_node *entry;
struct drm_mm_node *next;
struct drm_mm_node *entry, *next;
 
entry = list_entry(bnode, struct drm_mm_node, fl_entry);
 
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
 
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
kfree(entry);
 
spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
list_del(&entry->fl_entry);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
/drivers/video/drm/drm_modes.c
76,7 → 76,7
* according to the hdisplay, vdisplay, vrefresh.
* It is based from the VESA(TM) Coordinated Video Timing Generator by
* Graham Loveridge April 9, 2003 available at
* http://www.vesa.org/public/CVT/CVTd6r1.xls
* http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
*
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
* What I have done is to translate it by using integer calculation.
251,7 → 251,10
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
/* Fill in VSync values */
drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
258,8 → 261,10
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced)
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
268,8 → 273,6
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
if (interlaced)
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
 
return drm_mode;
}
276,7 → 279,7
EXPORT_SYMBOL(drm_cvt_mode);
 
/**
* drm_gtf_mode - create the modeline based on GTF algorithm
* drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
283,28 → 286,22
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
* @margins :whether the margin is supported
* @margins :desired margin size
* @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
* return the modeline based on GTF algorithm
* return the modeline based on full GTF algorithm.
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
* What I have done is to translate it by using integer calculation.
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
* GTF feature blocks specify C and J in multiples of 0.5, so we pass them
* in here multiplied by two. For a C of 40, pass in 80.
*/
struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool interlaced, int margins)
{
/* 1) top/bottom margin size (% of height) - default: 1.8, */
struct drm_display_mode *
drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
int vrefresh, bool interlaced, int margins,
int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
316,16 → 313,8
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
/* blanking formula gradient */
#define GTF_M 600
/* blanking formula offset */
#define GTF_C 40
/* blanking formula scaling factor */
#define GTF_K 128
/* blanking formula scaling factor */
#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
460,17 → 449,61
 
drm_mode->clock = pixel_freq;
 
drm_mode_set_name(drm_mode);
drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
 
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
 
drm_mode_set_name(drm_mode);
if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
else
drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
 
return drm_mode;
}
EXPORT_SYMBOL(drm_gtf_mode_complex);
 
/**
* drm_gtf_mode - create the modeline based on GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
* @margins :whether the margin is supported
*
* LOCKING.
* none.
*
* return the modeline based on GTF algorithm
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
* What I have done is to translate it by using integer calculation.
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
*
* Standard GTF parameters:
* M = 600
* C = 40
* K = 128
* J = 20
*/
struct drm_display_mode *
drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
bool lace, int margins)
{
return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
margins, 600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
 
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
482,8 → 515,11
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
mode->vdisplay);
bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
 
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
mode->hdisplay, mode->vdisplay,
interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
 
557,7 → 593,7
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
int drm_mode_hsync(struct drm_display_mode *mode)
int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
 
591,7 → 627,7
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
int drm_mode_vrefresh(struct drm_display_mode *mode)
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
689,7 → 725,7
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
/drivers/video/drm/i2c/i2c-algo-bit.c
43,7 → 43,6
do {} while (0)
#endif /* DEBUG */
 
 
/* ----- global variables --------------------------------------------- */
 
static int bit_test; /* see if the line-setting functions work */
165,8 → 164,8
setsda(adap, sb);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timed out */
// bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
// "timeout at bit #%d\n", (int)c, i);
bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
"timeout at bit #%d\n", (int)c, i);
return -ETIMEDOUT;
}
/* FIXME do arbitration here:
179,8 → 178,8
}
sdahi(adap);
if (sclhi(adap) < 0) { /* timeout */
// bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
// "timeout at ack\n", (int)c);
bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
"timeout at ack\n", (int)c);
return -ETIMEDOUT;
}
 
188,8 → 187,8
* NAK (usually to report problems with the data we wrote).
*/
ack = !getsda(adap); /* ack: sda is pulled low -> success */
// bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c,
// ack ? "A" : "NA");
bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c,
ack ? "A" : "NA");
 
scllo(adap);
return ack;
227,10 → 226,18
* Sanity check for the adapter hardware - check the reaction of
* the bus lines only if it seems to be idle.
*/
static int test_bus(struct i2c_algo_bit_data *adap, char *name)
static int test_bus(struct i2c_adapter *i2c_adap)
{
int scl, sda;
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
const char *name = i2c_adap->name;
int scl, sda, ret;
 
if (adap->pre_xfer) {
ret = adap->pre_xfer(i2c_adap);
if (ret < 0)
return -ENODEV;
}
 
if (adap->getscl == NULL)
pr_info("%s: Testing SDA only, SCL is not readable\n", name);
 
292,11 → 299,19
"while pulling SCL high!\n", name);
goto bailout;
}
 
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
 
pr_info("%s: Test OK\n", name);
return 0;
bailout:
sdahi(adap);
sclhi(adap);
 
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
 
return -ENODEV;
}
 
429,11 → 444,11
msg->len += inval;
}
 
// bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n",
// inval,
// (flags & I2C_M_NO_RD_ACK)
// ? "(no ack/nak)"
// : (count ? "A" : "NA"));
bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n",
inval,
(flags & I2C_M_NO_RD_ACK)
? "(no ack/nak)"
: (count ? "A" : "NA"));
 
if (!(flags & I2C_M_NO_RD_ACK)) {
inval = acknak(i2c_adap, count);
516,6 → 531,13
int i, ret;
unsigned short nak_ok;
 
//ENTER();
if (adap->pre_xfer) {
ret = adap->pre_xfer(i2c_adap);
if (ret < 0)
return ret;
}
 
bit_dbg(3, &i2c_adap->dev, "emitting start condition\n");
i2c_start(adap);
for (i = 0; i < num; i++) {
564,6 → 586,10
bailout:
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
i2c_stop(adap);
// LEAVE();
 
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
return ret;
}
 
586,15 → 612,17
/*
* registering functions to load algorithms at runtime
*/
static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
static int __i2c_bit_add_bus(struct i2c_adapter *adap,
int (*add_adapter)(struct i2c_adapter *))
{
struct i2c_algo_bit_data *bit_adap = adap->algo_data;
int ret;
 
// if (bit_test) {
// int ret = test_bus(bit_adap, adap->name);
// if (ret < 0)
// return -ENODEV;
// }
if (bit_test) {
ret = test_bus(adap);
if (ret < 0)
return -ENODEV;
}
 
/* register new adapter to i2c module... */
adap->algo = &i2c_bit_algo;
605,12 → 633,7
 
int i2c_bit_add_bus(struct i2c_adapter *adap)
{
int err;
return __i2c_bit_add_bus(adap, NULL);
}
 
err = i2c_bit_prepare_bus(adap);
if (err)
return err;
 
return 0; //i2c_add_adapter(adap);
}
 
/drivers/video/drm/i2c/i2c-core.c
64,27 → 64,12
*/
 
if (adap->algo->master_xfer) {
#ifdef DEBUG
for (ret = 0; ret < num; ret++) {
dev_dbg(&adap->dev, "master_xfer[%d] %c, addr=0x%02x, "
"len=%d%s\n", ret, (msgs[ret].flags & I2C_M_RD)
? 'R' : 'W', msgs[ret].addr, msgs[ret].len,
(msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : "");
}
#endif
 
// if (in_atomic() || irqs_disabled()) {
// ret = mutex_trylock(&adap->bus_lock);
// if (!ret)
// /* I2C activity is ongoing. */
// return -EAGAIN;
// } else {
// mutex_lock_nested(&adap->bus_lock, adap->level);
// }
 
/* Retry automatically on arbitration loss */
// orig_jiffies = jiffies;
orig_jiffies = 0;
for (ret = 0, try = 0; try <= adap->retries; try++) {
 
ret = adap->algo->master_xfer(adap, msgs, num);
if (ret != -EAGAIN)
break;
93,7 → 78,6
delay(1);
}
// mutex_unlock(&adap->bus_lock);
 
return ret;
} else {
// dev_dbg(&adap->dev, "I2C level transfers not supported\n");
103,6 → 87,42
EXPORT_SYMBOL(i2c_transfer);
 
 
/**
* i2c_new_device - instantiate an i2c device
* @adap: the adapter managing the device
* @info: describes one I2C device; bus_num is ignored
* Context: can sleep
*
* Create an i2c device. Binding is handled through driver model
* probe()/remove() methods. A driver may be bound to this device when we
* return from this function, or any later moment (e.g. maybe hotplugging will
* load the driver module). This call is not appropriate for use by mainboard
* initialization logic, which usually runs during an arch_initcall() long
* before any i2c_adapter could exist.
*
* This returns the new i2c client, which may be saved for later use with
* i2c_unregister_device(); or NULL to indicate an error.
*/
struct i2c_client *
i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
struct i2c_client *client;
int status;
 
client = kzalloc(sizeof *client, GFP_KERNEL);
if (!client)
return NULL;
 
client->adapter = adap;
 
client->flags = info->flags;
client->addr = info->addr;
client->irq = info->irq;
 
strlcpy(client->name, info->type, sizeof(client->name));
 
return client;
}
 
 
 
/drivers/video/drm/radeon/Makefile
15,7 → 15,7
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux
 
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i486 -fomit-frame-pointer -fno-builtin-printf
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
 
LIBPATH:= $(DRV_TOPDIR)/ddk
 
43,6 → 43,7
NAME_SRC= \
pci.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_crtc.c \
51,7 → 52,6
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
evergreen.c \
radeon_device.c \
radeon_clocks.c \
radeon_i2c.c \
58,6 → 58,7
atom.c \
radeon_atombios.c \
radeon_agp.c \
radeon_asic.c \
atombios_crtc.c \
atombios_dp.c \
radeon_encoders.c \
71,7 → 72,7
radeon_gart.c \
radeon_ring.c \
radeon_object_kos.c \
radeon_gem.c \
radeon_pm.c \
r100.c \
r200.c \
r300.c \
88,7 → 89,6
radeon_fb.c \
rdisplay.c \
rdisplay_kms.c \
radeon_pm.c \
cmdline.c \
fwblob.asm
# cursor.S
/drivers/video/drm/radeon/ObjectID.h
37,6 → 37,8
#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
#define GRAPH_OBJECT_TYPE_ROUTER 0x4
/* deleted */
#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6
#define GRAPH_OBJECT_TYPE_GENERIC 0x7
 
/****************************************************/
/* Encoder Object ID Definition */
64,6 → 66,9
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
#define ENCODER_OBJECT_ID_ALMOND 0x22
#define ENCODER_OBJECT_ID_TRAVIS 0x23
#define ENCODER_OBJECT_ID_NUTMEG 0x22
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
108,6 → 113,7
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
 
/* deleted */
 
124,6 → 130,7
#define GENERIC_OBJECT_ID_GLSYNC 0x01
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
 
/****************************************************/
/* Graphics Object ENUM ID Definition */
360,6 → 367,26
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
 
#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
 
#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
 
#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
 
#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
 
#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
 
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
421,6 → 448,14
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
512,6 → 547,7
#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
 
#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
593,6 → 629,14
GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
 
#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
 
#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
 
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
621,6 → 665,10
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
 
#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
 
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
/drivers/video/drm/radeon/atom.c
24,6 → 24,7
 
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
 
#define ATOM_DEBUG
31,6 → 32,7
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
#include "radeon.h"
 
#define ATOM_COND_ABOVE 0
#define ATOM_COND_ABOVEOREQUAL 1
52,15 → 54,17
 
typedef struct {
struct atom_context *ctx;
 
uint32_t *ps, *ws;
int ps_shift;
uint16_t start;
unsigned last_jump;
unsigned long last_jump_jiffies;
bool abort;
} atom_exec_context;
 
int atom_debug = 0;
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
 
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
98,7 → 102,9
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
struct radeon_device *rdev = ctx->card->dev->dev_private;
uint32_t temp = 0xCDCDCDCD;
 
while (1)
switch (CU8(base)) {
case ATOM_IIO_NOP:
105,12 → 111,13
base++;
break;
case ATOM_IIO_READ:
temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
(void)ctx->card->reg_read(ctx->card, CU16(base + 1));
ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
if (rdev->family == CHIP_RV515)
(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
128,7 → 135,7
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 2));
CU8(base + 3));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
138,7 → 145,7
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 2));
CU8(base + 3));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
148,7 → 155,7
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 2));
CU8(base + 3));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
604,13 → 611,18
static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
{
int idx = U8((*ptr)++);
int r = 0;
 
if (idx < ATOM_TABLE_NAMES_CNT)
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
if (r) {
ctx->abort = true;
}
}
 
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
{
640,12 → 652,12
 
static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t count = U8((*ptr)++);
unsigned count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
else
mdelay(count);
msleep(count);
}
 
static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
673,6 → 685,8
static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
{
int execute = 0, target = U16(*ptr);
unsigned long cjiffies;
 
(*ptr) += 2;
switch (arg) {
case ATOM_COND_ABOVE:
707,16 → 721,16
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src1, src2, saved;
uint32_t dst, mask, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src1: ");
src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" src2: ");
src2 = atom_get_src(ctx, attr, ptr);
dst &= src1;
dst |= src2;
mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" mask: 0x%08x", mask);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst &= mask;
dst |= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
881,11 → 895,16
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
/* op needs to full dst value */
dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
dst &= atom_arg_mask[dst_align];
dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
895,11 → 914,16
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
/* op needs to full dst value */
dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
dst &= atom_arg_mask[dst_align];
dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
1104,15 → 1128,16
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
 
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
unsigned char op;
atom_exec_context ectx;
int ret = 0;
 
if (!base)
return;
return -EINVAL;
 
len = CU16(base + ATOM_CT_SIZE_PTR);
ws = CU8(base + ATOM_CT_WS_PTR);
1125,6 → 1150,8
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
ectx.abort = false;
ectx.last_jump = 0;
if (ws)
ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
else
1137,6 → 1164,12
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
else
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
if (ectx.abort) {
DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
base, len, ws, ps, ptr - 1);
ret = -EINVAL;
goto free;
}
 
if (op < ATOM_OP_CNT && op > 0)
opcode_table[op].func(&ectx, &ptr,
1150,12 → 1183,16
debug_depth--;
SDEBUG("<<\n");
 
free:
if (ws)
kfree(ectx.ws);
return ret;
}
 
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
 
mutex_lock(&ctx->mutex);
/* reset reg block */
ctx->reg_block = 0;
1163,8 → 1200,9
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
atom_execute_table_locked(ctx, index, params);
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
}
 
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1248,9 → 1286,7
 
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
atom_execute_table(ctx, ATOM_CMD_INIT, ps);
 
return 0;
return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
}
 
void atom_destroy(struct atom_context *ctx)
1260,13 → 1296,17
kfree(ctx);
}
 
void atom_parse_data_header(struct atom_context *ctx, int index,
bool atom_parse_data_header(struct atom_context *ctx, int index,
uint16_t * size, uint8_t * frev, uint8_t * crev,
uint16_t * data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
 
if (!mdt[index])
return false;
 
if (size)
*size = CU16(idx);
if (frev)
1274,20 → 1314,24
if (crev)
*crev = CU8(idx + 3);
*data_start = idx;
return;
return true;
}
 
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
uint8_t * crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
 
if (!mct[index])
return false;
 
if (frev)
*frev = CU8(idx + 2);
if (crev)
*crev = CU8(idx + 3);
return;
return true;
}
 
int atom_allocate_fb_scratch(struct atom_context *ctx)
1294,11 → 1338,10
{
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
int usage_bytes;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
 
atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
 
if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
 
DRM_DEBUG("atom firmware requested %08x %dkb\n",
1306,6 → 1349,7
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
 
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
}
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
/drivers/video/drm/radeon/atom.h
113,6 → 113,8
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
140,11 → 142,13
extern int atom_debug;
 
struct atom_context *atom_parse(struct card_info *, void *);
void atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
uint8_t *frev, uint8_t *crev, uint16_t *data_start);
bool atom_parse_cmd_header(struct atom_context *ctx, int index,
uint8_t *frev, uint8_t *crev);
int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"
/drivers/video/drm/radeon/atombios.h
73,8 → 73,18
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
#define ATOM_DCPLL 2
#define ATOM_PPLL0 2
#define ATOM_EXT_PLL1 8
#define ATOM_EXT_PLL2 9
#define ATOM_EXT_CLOCK 10
#define ATOM_PPLL_INVALID 0xFF
 
#define ENCODER_REFCLK_SRC_P1PLL 0
#define ENCODER_REFCLK_SRC_P2PLL 1
#define ENCODER_REFCLK_SRC_DCPLL 2
#define ENCODER_REFCLK_SRC_EXTCLK 3
#define ENCODER_REFCLK_SRC_INVALID 0xFF
 
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
 
192,6 → 202,9
/*Image can't be updated, while Driver needs to carry the new table! */
}ATOM_COMMON_TABLE_HEADER;
 
/****************************************************************************/
// Structure stores the ROM header.
/****************************************************************************/
typedef struct _ATOM_ROM_HEADER
{
ATOM_COMMON_TABLE_HEADER sHeader;
221,6 → 234,9
#define USHORT void*
#endif
 
/****************************************************************************/
// Structures used in Command.mtb
/****************************************************************************/
typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
312,6 → 328,7
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
#define GetDispObjectInfo EnableYUV
 
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
357,7 → 374,25
/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
#define ADJUST_MC_SETTING_PARAM 3
 
/****************************************************************************/
// Structures used by AdjustMemoryControllerTable
/****************************************************************************/
typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
{
#if ATOM_BIG_ENDIAN
ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
ULONG ulClockFreq:24;
#else
ULONG ulClockFreq:24;
ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
#endif
}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
#define POINTER_RETURN_FLAG 0x80
 
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
{
ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
440,6 → 475,26
#endif
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
 
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
{
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
UCHAR ucPostDiv; //Output Parameter
union
{
UCHAR ucCntlFlag; //Output Flags
UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
};
UCHAR ucReserved;
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
 
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
 
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
583,6 → 638,7
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_ENCODER_CONFIG_LINKA 0x00
#define ATOM_ENCODER_CONFIG_LINKB 0x04
608,6 → 664,9
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
#define ATOM_ENCODER_MODE_DVO 16
#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2
#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2
 
typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
{
661,40 → 720,53
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
#define ATOM_ENCODER_CMD_SETUP 0x0f
#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10
 
// ucStatus
#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
 
//ucTableFormatRevision=1
//ucTableContentRevision=3
// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved:3;
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
UCHAR ucReserved:3;
UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V3;
 
#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00
#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10
#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20
#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30
#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40
#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50
 
 
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
{
USHORT usPixelClock; // in 10KHz; for bios convenient
ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
UCHAR ucAction;
union {
UCHAR ucEncoderMode;
// =0: DP encoder
// =1: LVDS encoder
702,12 → 774,73
// =3: HDMI encoder
// =4: SDVO encoder
// =5: DP audio
UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
// =0: external DP
// =1: internal DP2
// =0x11: internal DP1 for NutMeg/Travis DP translator
};
UCHAR ucLaneNum; // how many lanes to enable
UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
UCHAR ucReserved;
}DIG_ENCODER_CONTROL_PARAMETERS_V3;
 
//ucTableFormatRevision=1
//ucTableContentRevision=4
// start from NI
// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved:2;
UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
#else
UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
UCHAR ucReserved:2;
UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V4;
 
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20
#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
 
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
{
USHORT usPixelClock; // in 10KHz; for bios convenient
union{
ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
UCHAR ucConfig;
};
UCHAR ucAction;
union {
UCHAR ucEncoderMode;
// =0: DP encoder
// =1: LVDS encoder
// =2: DVI encoder
// =3: HDMI encoder
// =4: SDVO encoder
// =5: DP audio
UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
// =0: external DP
// =1: internal DP2
// =0x11: internal DP1 for NutMeg/Travis DP translator
};
UCHAR ucLaneNum; // how many lanes to enable
UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
}DIG_ENCODER_CONTROL_PARAMETERS_V4;
 
// define ucBitPerColor:
#define PANEL_BPC_UNDEFINE 0x00
#define PANEL_6BIT_PER_COLOR 0x01
716,6 → 849,11
#define PANEL_12BIT_PER_COLOR 0x04
#define PANEL_16BIT_PER_COLOR 0x05
 
//define ucPanelMode
#define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00
#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01
#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11
 
/****************************************************************************/
// Structures used by UNIPHYTransmitterControlTable
// LVTMATransmitterControlTable
893,6 → 1031,7
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V3;
 
 
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
{
union
936,7 → 1075,150
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
 
 
/****************************************************************************/
// Structures used by UNIPHYTransmitterControlTable V1.4
// ASIC Families: NI
// ucTableFormatRevision=1
// ucTableContentRevision=4
/****************************************************************************/
typedef struct _ATOM_DP_VS_MODE_V4
{
UCHAR ucLaneSel;
union
{
UCHAR ucLaneSet;
struct {
#if ATOM_BIG_ENDIAN
UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
#else
UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
#endif
};
};
}ATOM_DP_VS_MODE_V4;
typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
{
#if ATOM_BIG_ENDIAN
UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
// =1 Dig Transmitter 2 ( Uniphy CD )
// =2 Dig Transmitter 3 ( Uniphy EF )
UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
// =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
#else
UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
// =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
// =1 Dig Transmitter 2 ( Uniphy CD )
// =2 Dig Transmitter 3 ( Uniphy EF )
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V4;
 
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
{
union
{
USHORT usPixelClock; // in 10KHz; for bios convenient
USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version
};
union
{
ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
UCHAR ucConfig;
};
UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
UCHAR ucLaneNum;
UCHAR ucReserved[3];
}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
 
//ucConfig
//Bit0
#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01
//Bit1
#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02
//Bit2
#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04
#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04
// Bit3
#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08
#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00
#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08
// Bit5:4
#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30
#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00
#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10
#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4
#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3
// Bit7:6
#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0
#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB
#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD
#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
 
 
/****************************************************************************/
// Structures used by ExternalEncoderControlTable V1.3
// ASIC Families: Evergreen, Llano, NI
// ucTableFormatRevision=1
// ucTableContentRevision=3
/****************************************************************************/
 
typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
{
union{
USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
USHORT usConnectorId; // connector id, valid when ucAction = INIT
};
UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
UCHAR ucAction; //
UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
UCHAR ucReserved;
}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
 
// ucAction
#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00
#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
 
// ucConfig
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02
#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70
#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00
#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10
#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20
 
typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
{
EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
ULONG ulReserved[2];
}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
 
 
/****************************************************************************/
// Structures used by DAC1OuputControlTable
// DAC2OuputControlTable
// LVTMAOutputControlTable (Before DEC30)
1142,6 → 1424,7
#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
 
 
typedef struct _PIXEL_CLOCK_PARAMETERS_V3
{
USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
1202,6 → 1485,55
#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
 
typedef struct _CRTC_PIXEL_CLOCK_FREQ
{
#if ATOM_BIG_ENDIAN
ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
// drive the pixel clock. not used for DCPLL case.
ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
// 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
#else
ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
// 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
// drive the pixel clock. not used for DCPLL case.
#endif
}CRTC_PIXEL_CLOCK_FREQ;
 
typedef struct _PIXEL_CLOCK_PARAMETERS_V6
{
union{
CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency
ULONG ulDispEngClkFreq; // dispclk frequency
};
USHORT usFbDiv; // feedback divider integer part.
UCHAR ucPostDiv; // post divider.
UCHAR ucRefDiv; // Reference divider
UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
// indicate which graphic encoder will be used.
UCHAR ucEncoderMode; // Encoder mode:
UCHAR ucMiscInfo; // bit[0]= Force program PPLL
// bit[1]= when VGA timing is used.
// bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
// bit[4]= RefClock source for PPLL.
// =0: XTLAIN( default mode )
// =1: other external clock source, which is pre-defined
// by VBIOS depend on the feature required.
// bit[7:5]: reserved.
ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
 
}PIXEL_CLOCK_PARAMETERS_V6;
 
#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01
#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02
#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
 
typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
{
PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
1241,10 → 1573,11
typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
{
USHORT usPixelClock; // target pixel clock
UCHAR ucTransmitterID; // transmitter id defined in objectid.h
UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h
UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
UCHAR ucReserved[3];
UCHAR ucExtTransmitterID; // external encoder id.
UCHAR ucReserved[2];
}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
 
// usDispPllConfig v1.2 for RoadRunner
1314,7 → 1647,7
typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
{
USHORT usPrescale; //Ratio between Engine clock and I2C clock
USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
USHORT usVRAMAddress; //Address in Frame Buffer where to pace raw EDID
USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
//WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
UCHAR ucSlaveAddr; //Read from which slave
1358,6 → 1691,7
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
 
 
/****************************************************************************/
// Structures used by PowerConnectorDetectionTable
/****************************************************************************/
1438,6 → 1772,31
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
 
// Used by DCE5.0
typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
{
USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0
UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
// Bit[1]: 1-Ext. 0-Int.
// Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
// Bits[7:4] reserved
UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00
#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01
#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02
#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c
#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8
 
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
 
/**************************************************************************/
1706,7 → 2065,7
USHORT StandardVESA_Timing; // Only used by Bios
USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
USHORT DAC_Info; // Will be obsolete from R600
USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT TMDS_Info; // Will be obsolete from R600
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
1736,6 → 2095,9
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
}ATOM_MASTER_LIST_OF_DATA_TABLES;
 
// For backward compatible
#define LVDS_Info LCD_Info
 
typedef struct _ATOM_MASTER_DATA_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
1742,6 → 2104,7
ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
}ATOM_MASTER_DATA_TABLE;
 
 
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
/****************************************************************************/
1776,11 → 2139,12
UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
}ATOM_MULTIMEDIA_CONFIG_INFO;
 
 
/****************************************************************************/
// Structures used in FirmwareInfoTable
/****************************************************************************/
 
// usBIOSCapability Defintion:
// usBIOSCapability Definition:
// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
2031,8 → 2395,47
UCHAR ucReserved4[3];
}ATOM_FIRMWARE_INFO_V2_1;
 
//the structure below to be used from NI
//ucTableFormatRevision=2
//ucTableContentRevision=2
typedef struct _ATOM_FIRMWARE_INFO_V2_2
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulFirmwareRevision;
ULONG ulDefaultEngineClock; //In 10Khz unit
ULONG ulDefaultMemoryClock; //In 10Khz unit
ULONG ulReserved[2];
ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ?
ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
UCHAR ucReserved3; //Was ucASICMaxTemperature;
UCHAR ucMinAllowedBL_Level;
USHORT usBootUpVDDCVoltage; //In MV unit
USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
ULONG ulReserved4; //Was ulAsicMaximumVoltage
ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
USHORT usMinPixelClockPLL_Input; //In 10Khz unit
USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
USHORT usCoreReferenceClock; //In 10Khz unit
USHORT usMemoryReferenceClock; //In 10Khz unit
USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
UCHAR ucMemoryModule_ID; //Indicate what is the board design
UCHAR ucReserved9[3];
USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
USHORT usReserved12;
ULONG ulReserved10[3]; // New added comparing to previous version
}ATOM_FIRMWARE_INFO_V2_2;
 
#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
 
/****************************************************************************/
// Structures used in IntegratedSystemInfoTable
2212,7 → 2615,7
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
 
ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
 
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
 
2250,7 → 2653,15
usMinDownStreamHTLinkWidth: same as above.
*/
 
// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition
#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
 
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
 
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
2778,9 → 3189,89
#define PANEL_RANDOM_DITHER 0x80
#define PANEL_RANDOM_DITHER_MASK 0x80
 
#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this
 
#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
/****************************************************************************/
// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12
// ASIC Families: NI
// ucTableFormatRevision=1
// ucTableContentRevision=3
/****************************************************************************/
typedef struct _ATOM_LCD_INFO_V13
{
ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_DTD_FORMAT sLCDTiming;
USHORT usExtInfoTableOffset;
USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
ULONG ulReserved0;
UCHAR ucLCD_Misc; // Reorganized in V13
// Bit0: {=0:single, =1:dual},
// Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB},
// Bit3:2: {Grey level}
// Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
// Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it?
UCHAR ucPanelDefaultRefreshRate;
UCHAR ucPanelIdentification;
UCHAR ucSS_Id;
USHORT usLCDVenderID;
USHORT usLCDProductID;
UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13
// Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
// Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
// Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
// Bit7-3: Reserved
UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13
 
UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
 
UCHAR ucOffDelay_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
UCHAR ucReserved1;
 
ULONG ulReserved[4];
}ATOM_LCD_INFO_V13;
 
#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
 
//Definitions for ucLCD_Misc
#define ATOM_PANEL_MISC_V13_DUAL 0x00000001
#define ATOM_PANEL_MISC_V13_FPDI 0x00000002
#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C
#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2
#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70
#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10
#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20
 
//Color Bit Depth definition in EDID V1.4 @BYTE 14h
//Bit 6 5 4
// 0 0 0 - Color bit depth is undefined
// 0 0 1 - 6 Bits per Primary Color
// 0 1 0 - 8 Bits per Primary Color
// 0 1 1 - 10 Bits per Primary Color
// 1 0 0 - 12 Bits per Primary Color
// 1 0 1 - 14 Bits per Primary Color
// 1 1 0 - 16 Bits per Primary Color
// 1 1 1 - Reserved
//Definitions for ucLCDPanel_SpecialHandlingCap:
 
//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
 
//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
 
//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
 
typedef struct _ATOM_PATCH_RECORD_MODE
{
UCHAR ucRecordType;
2868,7 → 3359,7
/****************************************************************************/
// Structure used in AnalogTV_InfoTable (Top level)
/****************************************************************************/
//ucTVBootUpDefaultStd definiton:
//ucTVBootUpDefaultStd definition:
 
//ATOM_TV_NTSC 1
//ATOM_TV_NTSCJ 2
2912,7 → 3403,7
UCHAR ucTV_BootUpDefaultStandard;
UCHAR ucExt_TV_ASIC_ID;
UCHAR ucExt_TV_ASIC_SlaveAddr;
ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
}ATOM_ANALOG_TV_INFO_V1_2;
 
typedef struct _ATOM_DPCD_INFO
2944,9 → 3435,9
#define MAX_DTD_MODE_IN_VRAM 6
#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
#define DFP_ENCODER_TYPE_OFFSET 0x80
#define DP_ENCODER_LANE_NUM_OFFSET 0x84
#define DP_ENCODER_LINK_RATE_OFFSET 0x88
//20 bytes for Encoder Type and DPCD in STD EDID area
#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 )
 
#define ATOM_HWICON1_SURFACE_ADDR 0
#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
2999,12 → 3490,14
 
#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
 
#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024)
#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
 
//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
 
#define ATOM_VRAM_RESERVE_V2_SIZE 32
 
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
3206,6 → 3699,15
USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
 
typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
{
USHORT usDeviceTag; //supported device
USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
USHORT usConnObjectId; //Connector Object ID
USHORT usGPUObjectId; //GPU ID
USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
 
typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
{
UCHAR ucNumOfDispPath;
3261,6 → 3763,47
#define EXT_AUXDDC_LUTINDEX_7 7
#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
 
//ucChannelMapping are defined as following
//for DP connector, eDP, DP to VGA/LVDS
//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
{
#if ATOM_BIG_ENDIAN
UCHAR ucDP_Lane3_Source:2;
UCHAR ucDP_Lane2_Source:2;
UCHAR ucDP_Lane1_Source:2;
UCHAR ucDP_Lane0_Source:2;
#else
UCHAR ucDP_Lane0_Source:2;
UCHAR ucDP_Lane1_Source:2;
UCHAR ucDP_Lane2_Source:2;
UCHAR ucDP_Lane3_Source:2;
#endif
}ATOM_DP_CONN_CHANNEL_MAPPING;
 
//for DVI/HDMI, in dual link case, both links have to have same mapping.
//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
{
#if ATOM_BIG_ENDIAN
UCHAR ucDVI_CLK_Source:2;
UCHAR ucDVI_DATA0_Source:2;
UCHAR ucDVI_DATA1_Source:2;
UCHAR ucDVI_DATA2_Source:2;
#else
UCHAR ucDVI_DATA2_Source:2;
UCHAR ucDVI_DATA1_Source:2;
UCHAR ucDVI_DATA0_Source:2;
UCHAR ucDVI_CLK_Source:2;
#endif
}ATOM_DVI_CONN_CHANNEL_MAPPING;
 
typedef struct _EXT_DISPLAY_PATH
{
USHORT usDeviceTag; //A bit vector to show what devices are supported
3269,7 → 3812,13
UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
USHORT usExtEncoderObjId; //external encoder object id
USHORT usReserved[3];
union{
UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping
ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
};
UCHAR ucReserved;
USHORT usReserved[2];
}EXT_DISPLAY_PATH;
#define NUMBER_OF_UCHAR_FOR_GUID 16
3281,10 → 3830,11
UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
UCHAR Reserved [7]; // for potential expansion
UCHAR uc3DStereoPinId; // use for eDP panel
UCHAR Reserved [6]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
//Related definitions, all records are differnt but they have a commond header
//Related definitions, all records are different but they have a commond header
typedef struct _ATOM_COMMON_RECORD_HEADER
{
UCHAR ucRecordType; //An emun to indicate the record type
3311,10 → 3861,11
#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
 
 
//Must be updated when new record type is added,equal to that record definition!
#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
 
typedef struct _ATOM_I2C_RECORD
{
3441,6 → 3992,26
UCHAR ucPadding[2];
}ATOM_ENCODER_DVO_CF_RECORD;
 
// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
 
typedef struct _ATOM_ENCODER_CAP_RECORD
{
ATOM_COMMON_RECORD_HEADER sheader;
union {
USHORT usEncoderCap;
struct {
#if ATOM_BIG_ENDIAN
USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
#else
USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
#endif
};
};
}ATOM_ENCODER_CAP_RECORD;
 
// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
3580,6 → 4151,11
#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
#define VOLTAGE_CONTROL_ID_DS4402 0x04
#define VOLTAGE_CONTROL_ID_UP6266 0x05
#define VOLTAGE_CONTROL_ID_SCORPIO 0x06
#define VOLTAGE_CONTROL_ID_VT1556M 0x07
#define VOLTAGE_CONTROL_ID_CHL822x 0x08
#define VOLTAGE_CONTROL_ID_VT1586M 0x09
 
typedef struct _ATOM_VOLTAGE_OBJECT
{
3670,6 → 4246,23
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
 
typedef struct _ATOM_CLK_VOLT_CAPABILITY
{
ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
}ATOM_CLK_VOLT_CAPABILITY;
 
typedef struct _ATOM_AVAILABLE_SCLK_LIST
{
ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK
USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK
}ATOM_AVAILABLE_SCLK_LIST;
 
// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0]
 
// this IntegrateSystemInfoTable is used for Liano/Ontario APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
{
ATOM_COMMON_TABLE_HEADER sHeader;
3676,60 → 4269,134
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
ULONG ulReserved1[8];
ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulOtherDisplayMisc;
ULONG ulGPUCapInfo;
ULONG ulReserved2[3];
ULONG ulSB_MMIO_Base_Addr;
USHORT usRequestedPWMFreqInHz;
UCHAR ucHtcTmpLmt;
UCHAR ucHtcHystLmt;
ULONG ulMinEngineClock;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
USHORT usMaxNBVoltage;
USHORT usMinNBVoltage;
USHORT usNBP0Voltage;
USHORT usNBP1Voltage;
USHORT usBootUpNBVoltage;
USHORT usExtDispConnInfoOffset;
UCHAR ucHtcTmpLmt;
UCHAR ucTjOffset;
USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
ULONG ulCSR_M3_ARB_CNTL_UVD[10];
ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
ULONG ulReserved3[42];
ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
ULONG ulGMCRestoreResetTime;
ULONG ulMinimumNClk;
ULONG ulIdleNClk;
ULONG ulDDR_DLL_PowerUpTime;
ULONG ulDDR_PLL_PowerUpTime;
USHORT usPCIEClkSSPercentage;
USHORT usPCIEClkSSType;
USHORT usLvdsSSPercentage;
USHORT usLvdsSSpreadRateIn10Hz;
USHORT usHDMISSPercentage;
USHORT usHDMISSpreadRateIn10Hz;
USHORT usDVISSPercentage;
USHORT usDVISSpreadRateIn10Hz;
ULONG ulReserved3[21];
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V6;
 
// ulGPUCapInfo
#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
 
// ulOtherDisplayMisc
#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
 
 
/**********************************************************************************************************************
// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
//ulReserved1[8] Reserved by now, must be 0x0.
//ulBootUpReqDisplayVector VBIOS boot up display IDs
// ATOM_DEVICE_CRT1_SUPPORT 0x0001
// ATOM_DEVICE_CRT2_SUPPORT 0x0010
// ATOM_DEVICE_DFP1_SUPPORT 0x0008
// ATOM_DEVICE_DFP6_SUPPORT 0x0040
// ATOM_DEVICE_DFP2_SUPPORT 0x0080
// ATOM_DEVICE_DFP3_SUPPORT 0x0200
// ATOM_DEVICE_DFP4_SUPPORT 0x0400
// ATOM_DEVICE_DFP5_SUPPORT 0x0800
// ATOM_DEVICE_LCD1_SUPPORT 0x0002
//ulOtherDisplayMisc Other display related flags, not defined yet.
//ulGPUCapInfo TBD
//ulReserved2[3] must be 0x0 for the reserved.
//ulSystemConfig TBD
//ulCPUCapInfo TBD
//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
//usBootUpNBVoltage Boot up NB voltage in unit of mv.
//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
//ucUMAChannelNumber System memory channel numbers.
//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
sDISPCLK_Voltage: Report Display clock voltage requirement.
ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
ATOM_DEVICE_CRT1_SUPPORT 0x0001
ATOM_DEVICE_CRT2_SUPPORT 0x0010
ATOM_DEVICE_DFP1_SUPPORT 0x0008
ATOM_DEVICE_DFP6_SUPPORT 0x0040
ATOM_DEVICE_DFP2_SUPPORT 0x0080
ATOM_DEVICE_DFP3_SUPPORT 0x0200
ATOM_DEVICE_DFP4_SUPPORT 0x0400
ATOM_DEVICE_DFP5_SUPPORT 0x0800
ATOM_DEVICE_LCD1_SUPPORT 0x0002
ulOtherDisplayMisc: Other display related flags, not defined yet.
ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
=1: TMDS/HDMI Coherent Mode use signel PLL mode.
bit[3]=0: Enable HW AUX mode detection logic
=1: Disable HW AUX mode dettion logic
ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
 
usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
Changing BL using VBIOS function is functional in both driver and non-driver present environment;
and enabling VariBri under the driver environment from PP table is optional.
 
2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
that BL control from GPU is expected.
VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
it's per platform
and enabling VariBri under the driver environment from PP table is optional.
 
ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
Threshold on value to enter HTC_active state.
ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
=1: PCIE Power Gating Enabled
Bit[1]=0: DDR-DLL shut-down feature disabled.
1: DDR-DLL shut-down feature enabled.
Bit[2]=0: DDR-PLL Power down feature disabled.
1: DDR-PLL Power down feature enabled.
ulCPUCapInfo: TBD
usNBP0Voltage: VID for voltage on NB P0 State
usNBP1Voltage: VID for voltage on NB P1 State
usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
to indicate a range.
SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
ucUMAChannelNumber: System memory channel numbers.
ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
**********************************************************************************************************************/
 
/**************************************************************************/
3780,7 → 4447,7
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT;
 
//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
3790,6 → 4457,7
#define ASIC_INTERNAL_SS_ON_LVDS 6
#define ASIC_INTERNAL_SS_ON_DP 7
#define ASIC_INTERNAL_SS_ON_DCPLL 8
#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
 
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
3903,8 → 4571,9
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
 
//Byte aligned defintion for BIOS usage
//Byte aligned definition for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
#define ATOM_S0_CRT1_COLORb0 0x02
#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
3970,7 → 4639,7
#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
 
 
//Byte aligned defintion for BIOS usage
//Byte aligned definition for BIOS usage
#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
4020,7 → 4689,7
#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
 
//Byte aligned defintion for BIOS usage
//Byte aligned definition for BIOS usage
#define ATOM_S3_CRT1_ACTIVEb0 0x01
#define ATOM_S3_LCD1_ACTIVEb0 0x02
#define ATOM_S3_TV1_ACTIVEb0 0x04
4056,7 → 4725,7
#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
#define ATOM_S4_LCD1_REFRESH_SHIFT 8
 
//Byte aligned defintion for BIOS usage
//Byte aligned definition for BIOS usage
#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
4135,7 → 4804,7
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
 
//Byte aligned defintion for BIOS usage
//Byte aligned definition for BIOS usage
#define ATOM_S6_DEVICE_CHANGEb0 0x01
#define ATOM_S6_SCALER_CHANGEb0 0x02
#define ATOM_S6_LID_CHANGEb0 0x04
4376,7 → 5045,7
 
typedef struct _MEMORY_CLEAN_UP_PARAMETERS
{
USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
USHORT usMemoryStart; //in 8Kb boundary, offset from memory base address
USHORT usMemorySize; //8Kb blocks aligned
}MEMORY_CLEAN_UP_PARAMETERS;
#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
4529,8 → 5198,9
#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code
#define ACCESS_PLACEHOLDER 0x80
 
 
typedef struct _ATOM_MC_INIT_PARAM_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
4554,6 → 5224,10
#define _32Mx32 0x33
#define _64Mx8 0x41
#define _64Mx16 0x42
#define _64Mx32 0x43
#define _128Mx8 0x51
#define _128Mx16 0x52
#define _256Mx8 0x61
 
#define SAMSUNG 0x1
#define INFINEON 0x2
4569,10 → 5243,11
#define QIMONDA INFINEON
#define PROMOS MOSEL
#define KRETON INFINEON
#define ELIXIR NANYA
 
/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
 
#define UCODE_ROM_START_ADDRESS 0x1c000
#define UCODE_ROM_START_ADDRESS 0x1b800
#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
 
//uCode block header for reference
4903,8 → 5578,35
ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
}ATOM_VRAM_MODULE_V6;
 
typedef struct _ATOM_VRAM_MODULE_V7
{
// Design Specific Values
ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
USHORT usReserved;
UCHAR ucExtMemoryID; // Current memory module ID
UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
UCHAR ucChannelNum; // Number of mem. channels supported in this module
UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
UCHAR ucMisc; // RANK_OF_THISMEMORY etc.
UCHAR ucVREFI; // Not used.
UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
UCHAR ucReserved[3];
// Memory Module specific values
USHORT usEMRS2Value; // EMRS2/MR2 Value.
USHORT usEMRS3Value; // EMRS3/MR3 Value.
UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code
UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory
UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
char strMemPNString[20]; // part number end with '0'.
}ATOM_VRAM_MODULE_V7;
 
 
typedef struct _ATOM_VRAM_INFO_V2
{
ATOM_COMMON_TABLE_HEADER sHeader;
4942,6 → 5644,20
// ATOM_INIT_REG_BLOCK aMemAdjust;
}ATOM_VRAM_INFO_V4;
 
typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
{
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
USHORT usReserved[4];
UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
UCHAR ucReserved;
ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
}ATOM_VRAM_INFO_HEADER_V2_1;
 
 
typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
4999,7 → 5715,7
#define SW_I2C_CNTL_WRITE1BIT 6
 
//==============================VESA definition Portion===============================
#define VESA_OEM_PRODUCT_REV '01.00'
#define VESA_OEM_PRODUCT_REV "01.00"
#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
5182,6 → 5898,16
UCHAR ucReserved;
}ASIC_TRANSMITTER_INFO;
 
#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01
#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80
#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84
 
typedef struct _ASIC_ENCODER_INFO
{
UCHAR ucEncoderID;
5284,6 → 6010,28
/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
 
 
typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
{
USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
UCHAR ucAuxId;
UCHAR ucAction;
UCHAR ucSinkType; // Iput and Output parameters.
UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
UCHAR ucReserved[2];
}DP_ENCODER_SERVICE_PARAMETERS_V2;
 
typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
{
DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
 
// ucAction
#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01
#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02
 
 
// DP_TRAINING_TABLE
#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
5339,6 → 6087,7
#define SELECT_DCIO_IMPCAL 4
#define SELECT_DCIO_DIG 6
#define SELECT_CRTC_PIXEL_RATE 7
#define SELECT_VGA_BLK 8
 
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
5742,7 → 6491,20
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
 
// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
// We probably should reserve the bit 0x80 for this use.
// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
// The driver can pick the correct internal controller based on the ASIC.
 
#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
 
typedef struct _ATOM_PPLIB_STATE
{
UCHAR ucNonClockStateIndex;
5749,6 → 6511,26
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
 
typedef struct _ATOM_PPLIB_FANTABLE
{
UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
UCHAR ucTHyst; // Temperature hysteresis. Integer.
USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
USHORT usTMed; // The middle temperature where we change slopes.
USHORT usTHigh; // The high point above TMed for adjusting the second slope.
USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
USHORT usPWMMed; // The PWM value (in percent) at TMed.
USHORT usPWMHigh; // The PWM value at THigh.
} ATOM_PPLIB_FANTABLE;
 
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
ULONG ulMaxEngineClock; // For Overdrive.
ULONG ulMaxMemoryClock; // For Overdrive.
// Add extra system parameters here, always adjust size to include all fields.
} ATOM_PPLIB_EXTENDEDHEADER;
 
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
5762,6 → 6544,12
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
5797,6 → 6585,44
 
} ATOM_PPLIB_POWERPLAYTABLE;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
{
ATOM_PPLIB_POWERPLAYTABLE basicTable;
UCHAR ucNumCustomThermalPolicy;
USHORT usCustomThermalPolicyArrayOffset;
}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
{
ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
USHORT usFormatID; // To be used ONLY by PPGen.
USHORT usFanTableOffset;
USHORT usExtendendedHeaderOffset;
} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
{
ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
ULONG ulGoldenPPID; // PPGen use only
ULONG ulGoldenRevision; // PPGen use only
USHORT usVddcDependencyOnSCLKOffset;
USHORT usVddciDependencyOnMCLKOffset;
USHORT usVddcDependencyOnMCLKOffset;
USHORT usMaxClockVoltageOnDCOffset;
USHORT usReserved[2];
} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
{
ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
ULONG ulTDPLimit;
ULONG ulNearTDPLimit;
ULONG ulSQRampingThreshold;
USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
ULONG ulReserved;
} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
 
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
5816,8 → 6642,14
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
// remaining 3 bits are reserved
#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
 
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
 
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
5840,13 → 6672,31
 
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
 
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
//memory related flags
#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
 
//M3 Arb //2bits, current 3 sets of parameters in total
#define ATOM_PPLIB_M3ARB_MASK 0x00060000
#define ATOM_PPLIB_M3ARB_SHIFT 17
 
#define ATOM_PPLIB_ENABLE_DRR 0x00080000
 
// remaining 16 bits are reserved
typedef struct _ATOM_PPLIB_THERMAL_STATE
{
UCHAR ucMinTemperature;
UCHAR ucMaxTemperature;
UCHAR ucThermalAction;
}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
 
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
{
USHORT usClassification;
5854,7 → 6704,10
UCHAR ucMaxTemperature;
ULONG ulCapsAndSettings;
UCHAR ucRequiredPower;
UCHAR ucUnused1[3];
USHORT usClassification2;
ULONG ulVCLK;
ULONG ulDCLK;
UCHAR ucUnused[5];
} ATOM_PPLIB_NONCLOCK_INFO;
 
// Contained in an array starting at the offset
5882,7 → 6735,24
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
 
typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
 
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
 
USHORT usVDDC;
USHORT usVDDCI;
USHORT usUnused;
 
ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
 
} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
 
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
 
{
5895,7 → 6765,7
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
5913,10 → 6783,97
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
 
typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
UCHAR ucEngineClockHigh; //clockfrequency >> 16.
UCHAR vddcIndex; //2-bit vddc index;
UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
//please initalize to 0
UCHAR rsv;
//please initalize to 0
USHORT rsv1;
//please initialize to 0s
ULONG rsv2[2];
}ATOM_PPLIB_SUMO_CLOCK_INFO;
 
 
 
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
UCHAR nonClockInfoIndex;
/**
* Driver will read the first ucNumDPMLevels in this array
*/
UCHAR clockInfoIndex[1];
} ATOM_PPLIB_STATE_V2;
 
typedef struct StateArray{
//how many states we have
UCHAR ucNumEntries;
ATOM_PPLIB_STATE_V2 states[1];
}StateArray;
 
 
typedef struct ClockInfoArray{
//how many clock levels we have
UCHAR ucNumEntries;
//sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
UCHAR ucEntrySize;
//this is for Sumo
ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
}ClockInfoArray;
 
typedef struct NonClockInfoArray{
 
//how many non-clock levels we have. normally should be same as number of states
UCHAR ucNumEntries;
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
}NonClockInfoArray;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
{
USHORT usClockLow;
UCHAR ucClockHigh;
USHORT usVoltage;
}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
{
USHORT usSclkLow;
UCHAR ucSclkHigh;
USHORT usMclkLow;
UCHAR ucMclkHigh;
USHORT usVddc;
USHORT usVddci;
}ATOM_PPLIB_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
 
/**************************************************************************/
 
 
// Following definitions are for compatiblity issue in different SW components.
// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
#define Object_Info Object_Header
#define AdjustARB_SEQ MC_InitParameter
/drivers/video/drm/radeon/atombios_crtc.c
26,7 → 26,7
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon_fixed.h"
#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
44,19 → 44,14
 
memset(&args, 0, sizeof(args));
 
args.usOverscanRight = 0;
args.usOverscanLeft = 0;
args.usOverscanBottom = 0;
args.usOverscanTop = 0;
args.ucCRTC = radeon_crtc->crtc_id;
 
switch (radeon_crtc->rmx_type) {
case RMX_CENTER:
args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
break;
case RMX_ASPECT:
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
63,23 → 58,22
a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
 
if (a1 > a2) {
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
} else if (a2 > a1) {
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_FULL:
default:
args.usOverscanRight = 0;
args.usOverscanLeft = 0;
args.usOverscanBottom = 0;
args.usOverscanTop = 0;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
break;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_scaler_setup(struct drm_crtc *crtc)
245,12 → 239,13
 
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
/* XXX re-enable when interrupt support is added */
if (!ASIC_IS_DCE4(rdev))
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
257,13 → 252,15
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
/* XXX re-enable when interrupt support is added */
if (!ASIC_IS_DCE4(rdev))
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
}
}
280,22 → 277,22
u16 misc = 0;
 
memset(&args, 0, sizeof(args));
args.usH_Size = cpu_to_le16(mode->crtc_hdisplay);
args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
args.usH_Blanking_Time =
cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay);
args.usV_Size = cpu_to_le16(mode->crtc_vdisplay);
cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
args.usV_Blanking_Time =
cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay);
cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
args.usH_SyncOffset =
cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay);
cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
args.usH_SyncWidth =
cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
args.usV_SyncOffset =
cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay);
cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
/*args.ucH_Border = mode->hborder;*/
/*args.ucV_Border = mode->vborder;*/
args.ucH_Border = radeon_crtc->h_border;
args.ucV_Border = radeon_crtc->v_border;
 
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
336,6 → 333,11
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
 
args.ucOverscanRight = radeon_crtc->h_border;
args.ucOverscanLeft = radeon_crtc->h_border;
args.ucOverscanBottom = radeon_crtc->v_border;
args.ucOverscanTop = radeon_crtc->v_border;
 
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
353,67 → 355,151
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_disable_ss(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
u32 ss_cntl;
 
if (ASIC_IS_DCE4(rdev)) {
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
break;
case ATOM_PPLL2:
ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
break;
case ATOM_PPLL2:
ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
ss_cntl &= ~1;
WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
return;
}
}
}
 
 
union atom_enable_ss {
ENABLE_LVDS_SS_PARAMETERS legacy;
ENABLE_LVDS_SS_PARAMETERS lvds_ss;
ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
 
static void atombios_set_ss(struct drm_crtc *crtc, int enable)
static void atombios_crtc_program_ss(struct drm_crtc *crtc,
int enable,
int pll_id,
struct radeon_atom_ss *ss)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
struct radeon_encoder_atom_dig *dig = NULL;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
uint16_t percentage = 0;
uint8_t type = 0, step = 0, delay = 0, range = 0;
 
/* XXX add ss support for DCE4 */
if (ASIC_IS_DCE4(rdev))
return;
memset(&args, 0, sizeof(args));
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
/* only enable spread spectrum on LVDS */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
dig = radeon_encoder->enc_priv;
if (dig && dig->ss) {
percentage = dig->ss->percentage;
type = dig->ss->type;
step = dig->ss->step;
delay = dig->ss->delay;
range = dig->ss->range;
} else if (enable)
if (ASIC_IS_DCE5(rdev)) {
args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
switch (pll_id) {
case ATOM_PPLL1:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
} else if (enable)
}
args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
break;
}
args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
args.v1.ucSpreadSpectrumStep = ss->step;
args.v1.ucSpreadSpectrumDelay = ss->delay;
args.v1.ucSpreadSpectrumRange = ss->range;
args.v1.ucPpll = pll_id;
args.v1.ucEnable = enable;
} else if (ASIC_IS_AVIVO(rdev)) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(crtc);
return;
}
 
if (!radeon_encoder)
args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
args.lvds_ss_2.ucEnable = enable;
} else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(crtc);
return;
 
memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
args.v1.ucSpreadSpectrumType = type;
args.v1.ucSpreadSpectrumStep = step;
args.v1.ucSpreadSpectrumDelay = delay;
args.v1.ucSpreadSpectrumRange = range;
args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
args.v1.ucEnable = enable;
} else {
args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
args.legacy.ucSpreadSpectrumType = type;
args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
args.legacy.ucEnable = enable;
}
args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
args.lvds_ss.ucEnable = enable;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
424,36 → 510,28
 
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct radeon_pll *pll)
struct radeon_pll *pll,
bool ss_enabled,
struct radeon_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
struct drm_connector *connector = NULL;
u32 adjusted_clock = mode->clock;
int encoder_mode = 0;
u32 dp_clock = mode->clock;
int bpc = 8;
 
/* reset the pll flags */
pll->flags = 0;
 
/* select the PLL algo */
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll == 0)
pll->algo = PLL_ALGO_LEGACY;
else
pll->algo = PLL_ALGO_NEW;
} else {
if (radeon_new_pll == 1)
pll->algo = PLL_ALGO_NEW;
else
pll->algo = PLL_ALGO_LEGACY;
}
 
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
RADEON_PLL_PREFER_CLOSEST_LOWER);
 
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
460,6 → 538,9
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
if (rdev->family < CHIP_RV770)
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;
 
467,22 → 548,46
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
if (connector)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder)) {
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
}
}
 
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
}
 
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
/* LVDS PLL quirks */
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
pll->algo = dig->pll_algo;
}
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
pll->flags |= RADEON_PLL_IS_LCD;
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
503,8 → 608,9
int index;
 
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return adjusted_clock;
 
memset(&args, 0, sizeof(args));
 
516,6 → 622,9
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (ss_enabled && ss->percentage)
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
 
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
526,13 → 635,23
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (ss_enabled && ss->percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (encoder_mode == ATOM_ENCODER_MODE_DP)
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
else {
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else {
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
/* deep color support */
args.v3.sInput.usPixelClock =
cpu_to_le16((mode->clock * bpc / 8) / 10);
}
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
541,21 → 660,27
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
/* may want to enable SS on DP/eDP eventually */
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
}
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
}
579,9 → 704,14
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
PIXEL_CLOCK_PARAMETERS_V6 v6;
};
 
static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
u32 dispclk)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
592,8 → 722,9
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return;
 
switch (frev) {
case 1:
603,9 → 734,16
* SetPixelClock provides the dividers
*/
args.v5.ucCRTC = ATOM_CRTC_INVALID;
args.v5.usPixelClock = rdev->clock.default_dispclk;
args.v5.usPixelClock = cpu_to_le16(dispclk);
args.v5.ucPpll = ATOM_DCPLL;
break;
case 6:
/* if the default dcpll clock is specified,
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
args.v6.ucPpll = ATOM_DCPLL;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
618,105 → 756,123
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
static void atombios_crtc_program_pll(struct drm_crtc *crtc,
int crtc_id,
int pll_id,
u32 encoder_mode,
u32 encoder_id,
u32 clock,
u32 ref_div,
u32 fb_div,
u32 frac_fb_div,
u32 post_div,
int bpc,
bool ss_enabled,
struct radeon_atom_ss *ss)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u8 frev, crev;
int index;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args;
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
int encoder_mode = 0;
 
memset(&args, 0, sizeof(args));
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
 
if (!radeon_encoder)
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return;
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
break;
case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
pll = &rdev->clock.dcpll;
break;
}
 
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
 
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
 
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
 
switch (frev) {
case 1:
switch (crev) {
case 1:
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
if (clock == ATOM_DISABLE)
return;
args.v1.usPixelClock = cpu_to_le16(clock / 10);
args.v1.usRefDiv = cpu_to_le16(ref_div);
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
args.v1.ucPpll = radeon_crtc->pll_id;
args.v1.ucCRTC = radeon_crtc->crtc_id;
args.v1.ucPpll = pll_id;
args.v1.ucCRTC = crtc_id;
args.v1.ucRefDivSrc = 1;
break;
case 2:
args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v2.usPixelClock = cpu_to_le16(clock / 10);
args.v2.usRefDiv = cpu_to_le16(ref_div);
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
args.v2.ucPpll = radeon_crtc->pll_id;
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucPpll = pll_id;
args.v2.ucCRTC = crtc_id;
args.v2.ucRefDivSrc = 1;
break;
case 3:
args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v3.usPixelClock = cpu_to_le16(clock / 10);
args.v3.usRefDiv = cpu_to_le16(ref_div);
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
args.v3.ucPpll = radeon_crtc->pll_id;
args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
args.v3.ucPpll = pll_id;
args.v3.ucMiscInfo = (pll_id << 2);
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
args.v3.ucTransmitterId = encoder_id;
args.v3.ucEncoderMode = encoder_mode;
break;
case 5:
args.v5.ucCRTC = radeon_crtc->crtc_id;
args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v5.ucCRTC = crtc_id;
args.v5.usPixelClock = cpu_to_le16(clock / 10);
args.v5.ucRefDiv = ref_div;
args.v5.usFbDiv = cpu_to_le16(fb_div);
args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
args.v5.ucPostDiv = post_div;
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
args.v5.ucTransmitterID = radeon_encoder->encoder_id;
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
switch (bpc) {
case 8:
default:
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
break;
case 10:
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
break;
}
args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
args.v5.ucPpll = radeon_crtc->pll_id;
args.v5.ucPpll = pll_id;
break;
case 6:
args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
args.v6.ucRefDiv = ref_div;
args.v6.usFbDiv = cpu_to_le16(fb_div);
args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
args.v6.ucPostDiv = post_div;
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
switch (bpc) {
case 8:
default:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
break;
case 10:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
break;
case 12:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
break;
case 16:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
break;
}
args.v6.ucTransmitterID = encoder_id;
args.v6.ucEncoderMode = encoder_mode;
args.v6.ucPpll = pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
729,42 → 885,207
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
int encoder_mode = 0;
struct radeon_atom_ss ss;
bool ss_enabled = false;
int bpc = 8;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
 
if (!radeon_encoder)
return;
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
break;
case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
default:
pll = &rdev->clock.dcpll;
break;
}
 
if (radeon_encoder->active_device &
(ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector =
to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
bpc = connector->display_info.bpc;
 
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP:
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
if (ASIC_IS_DCE4(rdev))
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DP,
dp_clock);
else {
if (dp_clock == 16200) {
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID2);
if (!ss_enabled)
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID1);
} else
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID1);
}
break;
case ATOM_ENCODER_MODE_LVDS:
if (ASIC_IS_DCE4(rdev))
ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
dig->lcd_ss_id,
mode->clock / 10);
else
ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
dig->lcd_ss_id);
break;
case ATOM_ENCODER_MODE_DVI:
if (ASIC_IS_DCE4(rdev))
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_TMDS,
mode->clock / 10);
break;
case ATOM_ENCODER_MODE_HDMI:
if (ASIC_IS_DCE4(rdev))
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_HDMI,
mode->clock / 10);
break;
default:
break;
}
}
 
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
/* TV seems to prefer the legacy algo on some boards */
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else if (ASIC_IS_AVIVO(rdev))
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
 
atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
 
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
 
if (ss_enabled) {
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
ss.step = step_size;
}
 
atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
}
}
 
static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
 
/* no fb bound */
if (!crtc->fb) {
DRM_DEBUG("No FB bound\n");
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
 
if (atomic) {
radeon_fb = to_radeon_framebuffer(fb);
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
 
/* Pin framebuffer & get tilling informations */
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
obj = radeon_fb->obj;
rbo = obj->driver_private;
rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
 
if (atomic)
fb_location = radeon_bo_gpu_offset(rbo);
else {
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
}
}
 
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
 
switch (crtc->fb->bits_per_pixel) {
switch (target_fb->bits_per_pixel) {
case 8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
776,18 → 1097,29
case 16:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
case 24:
case 32:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
crtc->fb->bits_per_pixel);
target_fb->bits_per_pixel);
return -EINVAL;
}
 
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
820,15 → 1152,16
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
838,18 → 1171,23
y &= ~1;
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
(viewport_w << 16) | viewport_h);
 
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
rbo = radeon_fb->obj->driver_private;
/* set pageflip to happen anywhere in vblank interval */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
863,8 → 1201,9
return 0;
}
 
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
872,33 → 1211,50
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
struct drm_framebuffer *target_fb;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
u32 tmp, viewport_w, viewport_h;
int r;
 
/* no fb bound */
if (!crtc->fb) {
DRM_DEBUG("No FB bound\n");
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
 
if (atomic) {
radeon_fb = to_radeon_framebuffer(fb);
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
 
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = obj->driver_private;
rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
 
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
if (atomic)
fb_location = radeon_bo_gpu_offset(rbo);
else {
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
}
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
 
switch (crtc->fb->bits_per_pixel) {
switch (target_fb->bits_per_pixel) {
case 8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
913,6 → 1269,9
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
#ifdef __BIG_ENDIAN
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
case 24:
case 32:
919,18 → 1278,28
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
#ifdef __BIG_ENDIAN
fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
#endif
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
crtc->fb->bits_per_pixel);
target_fb->bits_per_pixel);
return -EINVAL;
}
 
if (rdev->family >= CHIP_R600) {
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
} else {
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
 
if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= AVIVO_D1GRPH_TILED;
}
 
if (radeon_crtc->crtc_id == 0)
WREG32(AVIVO_D1VGA_CONTROL, 0);
939,11 → 1308,11
 
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
} else {
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
}
}
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
951,15 → 1320,17
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
radeon_crtc->crtc_offset, (u32) fb_location);
WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
if (rdev->family >= CHIP_R600)
WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
969,18 → 1340,23
y &= ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
(viewport_w << 16) | viewport_h);
 
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
rbo = radeon_fb->obj->driver_private;
/* set pageflip to happen anywhere in vblank interval */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
1001,13 → 1377,28
struct radeon_device *rdev = dev->dev_private;
 
if (ASIC_IS_DCE4(rdev))
return evergreen_crtc_set_base(crtc, x, y, old_fb);
return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_set_base(crtc, x, y, old_fb);
return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
else
return radeon_crtc_set_base(crtc, x, y, old_fb);
return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
}
 
int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
if (ASIC_IS_DCE4(rdev))
return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
else
return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
}
 
/* properly set additional regs when using atombios */
static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
{
1042,11 → 1433,19
uint32_t pll_in_use = 0;
 
if (ASIC_IS_DCE4(rdev)) {
/* if crtc is driving DP and we have an ext clock, use that */
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
* DCE4: PPLL or ext clock
* DCE5: DCPLL or ext clock
*
* Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
* PPLL/DCPLL programming and only program the DP DTO for the
* crtc virtual pixel clock.
*/
if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
if (rdev->clock.dp_extclk)
if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
}
}
1080,24 → 1479,42
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
bool is_tvcv = false;
 
/* TODO color tiling */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
/* find tv std */
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
is_tvcv = true;
}
}
 
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
 
atombios_set_ss(crtc, 0);
/* always set DCPLL */
if (ASIC_IS_DCE4(rdev))
atombios_crtc_set_dcpll(crtc);
if (ASIC_IS_DCE4(rdev)) {
struct radeon_atom_ss ss;
bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
}
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_set_ss(crtc, 1);
 
if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
else if (ASIC_IS_AVIVO(rdev))
else if (ASIC_IS_AVIVO(rdev)) {
if (is_tvcv)
atombios_crtc_set_timing(crtc, adjusted_mode);
else {
else
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
} else {
atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1113,6 → 1530,12
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
1120,6 → 1543,11
 
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
 
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
1130,14 → 1558,36
atombios_lock_crtc(crtc, ATOM_DISABLE);
}
 
static void atombios_crtc_disable(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_atom_ss ss;
 
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
case ATOM_PPLL2:
/* disable the ppll */
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
}
radeon_crtc->pll_id = -1;
}
 
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.dpms = atombios_crtc_dpms,
.mode_fixup = atombios_crtc_mode_fixup,
.mode_set = atombios_crtc_mode_set,
.mode_set_base = atombios_crtc_set_base,
.mode_set_base_atomic = atombios_crtc_set_base_atomic,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.load_lut = radeon_crtc_load_lut,
.disable = atombios_crtc_disable,
};
 
void radeon_atombios_init_crtc(struct drm_device *dev,
/drivers/video/drm/radeon/atombios_dp.c
43,158 → 43,242
"0dB", "3.5dB", "6dB", "9.5dB"
};
 
static const int dp_clocks[] = {
54000, /* 1 lane, 1.62 Ghz */
90000, /* 1 lane, 2.70 Ghz */
108000, /* 2 lane, 1.62 Ghz */
180000, /* 2 lane, 2.70 Ghz */
216000, /* 4 lane, 1.62 Ghz */
360000, /* 4 lane, 2.70 Ghz */
/***** radeon AUX functions *****/
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
};
 
static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
u8 *send, int send_bytes,
u8 *recv, int recv_size,
u8 delay, u8 *ack)
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int recv_bytes;
 
/* common helper functions */
static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
memset(&args, 0, sizeof(args));
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
memcpy(base, send, send_bytes);
 
args.v1.lpAuxRequest = 0;
args.v1.lpDataOut = 16;
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*ack = args.v1.ucReplyStatus;
 
/* timeout */
if (args.v1.ucReplyStatus == 1) {
DRM_DEBUG_KMS("dp_aux_ch timeout\n");
return -ETIMEDOUT;
}
 
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
return -EBUSY;
}
 
/* error */
if (args.v1.ucReplyStatus == 3) {
DRM_DEBUG_KMS("dp_aux_ch error\n");
return -EIO;
}
 
recv_bytes = args.v1.ucDataOutLen;
if (recv_bytes > recv_size)
recv_bytes = recv_size;
 
if (recv && recv_size)
memcpy(recv, base + 16, recv_bytes);
 
return recv_bytes;
}
 
static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u16 address, u8 *send, u8 send_bytes, u8 delay)
{
int i;
u8 max_link_bw;
u8 max_lane_count;
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
int ret;
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
 
if (!dpcd)
return 0;
if (send_bytes > 16)
return -1;
 
max_link_bw = dpcd[DP_MAX_LINK_RATE];
max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_WRITE << 4;
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
 
switch (max_link_bw) {
case DP_LINK_BW_1_62:
default:
for (i = 0; i < num_dp_clocks; i++) {
if (i % 2)
continue;
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
while (1) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
}
if (dp_clocks[i] > mode_clock) {
if (i < 2)
return 1;
else if (i < 4)
return 2;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return 4;
return -EIO;
}
 
return send_bytes;
}
break;
case DP_LINK_BW_2_7:
for (i = 0; i < num_dp_clocks; i++) {
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
}
if (dp_clocks[i] > mode_clock) {
if (i < 2)
return 1;
else if (i < 4)
return 2;
 
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
u16 address, u8 *recv, int recv_bytes, u8 delay)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[4];
int msg_bytes = 4;
u8 ack;
int ret;
 
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
while (1) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return 4;
return -EIO;
}
}
break;
 
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
u16 reg, u8 val)
{
radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
}
 
return 0;
static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
u16 reg)
{
u8 val = 0;
 
radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
 
return val;
}
 
static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte)
{
int i;
u8 max_link_bw;
u8 max_lane_count;
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
u16 address = algo_data->address;
u8 msg[5];
u8 reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes = 1;
int ret;
u8 ack;
 
if (!dpcd)
return 0;
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[2] = AUX_I2C_READ << 4;
else
msg[2] = AUX_I2C_WRITE << 4;
 
max_link_bw = dpcd[DP_MAX_LINK_RATE];
max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
if (!(mode & MODE_I2C_STOP))
msg[2] |= AUX_I2C_MOT << 4;
 
switch (max_link_bw) {
case DP_LINK_BW_1_62:
default:
for (i = 0; i < num_dp_clocks; i++) {
if (i % 2)
continue;
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
msg[0] = address;
msg[1] = address >> 8;
 
switch (mode) {
case MODE_I2C_WRITE:
msg_bytes = 5;
msg[3] = msg_bytes << 4;
msg[4] = write_byte;
break;
case 2:
if (i > 3)
return 0;
case MODE_I2C_READ:
msg_bytes = 4;
msg[3] = msg_bytes << 4;
break;
case 4:
default:
msg_bytes = 4;
msg[3] = 3 << 4;
break;
}
if (dp_clocks[i] > mode_clock)
return 162000;
 
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
 
switch (ack & AUX_NATIVE_REPLY_MASK) {
case AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case DP_LINK_BW_2_7:
for (i = 0; i < num_dp_clocks; i++) {
switch (max_lane_count) {
case 1:
if (i > 1)
return 0;
case AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
return -EREMOTEIO;
}
 
switch (ack & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return ret;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(400);
break;
case 2:
if (i > 3)
return 0;
break;
case 4:
default:
break;
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
return -EREMOTEIO;
}
if (dp_clocks[i] > mode_clock)
return (i % 2) ? 270000 : 162000;
}
}
 
return 0;
DRM_ERROR("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
 
int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
{
int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
/***** general DP utility functions *****/
 
if ((lanes == 0) || (bw == 0))
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
242,7 → 326,7
return true;
}
 
static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
 
{
255,7 → 339,7
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
 
static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
267,22 → 351,8
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
 
/* XXX fix me -- chip specific */
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
static u8 dp_pre_emphasis_max(u8 voltage_swing)
{
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
}
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
 
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count,
296,7 → 366,7
u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
 
DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
308,12 → 378,12
}
 
if (v >= DP_VOLTAGE_MAX)
v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
v |= DP_TRAIN_MAX_SWING_REACHED;
 
if (p >= dp_pre_emphasis_max(v))
p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
if (p >= DP_PRE_EMPHASIS_MAX)
p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
 
321,110 → 391,109
train_set[lane] = v | p;
}
 
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
};
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
if (bpc == 0)
return 24;
else
return bpc * 3;
}
 
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
int num_bytes, u8 *read_byte,
u8 read_buf_len, u8 delay)
/* get the max pix clock supported by the link rate and lane num */
static int dp_get_max_dp_pix_clock(int link_rate,
int lane_num,
int bpp)
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int retry_count = 0;
 
memset(&args, 0, sizeof(args));
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
retry:
memcpy(base, req_bytes, num_bytes);
 
args.v1.lpAuxRequest = 0;
args.v1.lpDataOut = 16;
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd_id;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
return (link_rate * lane_num * 8) / bpp;
}
 
if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
if (read_buf_len < args.v1.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
read_buf_len, args.v1.ucDataOutLen);
return false;
}
static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
{
int len = min(read_buf_len, args.v1.ucDataOutLen);
memcpy(read_byte, base + 16, len);
switch (dpcd[DP_MAX_LINK_RATE]) {
case DP_LINK_BW_1_62:
default:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
case DP_LINK_BW_5_4:
return 540000;
}
}
return true;
 
static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
{
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
}
 
bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
uint8_t send_bytes, uint8_t *send)
static u8 dp_get_dp_link_rate_coded(int link_rate)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[20];
u8 msg_len, dp_msg_len;
bool ret;
switch (link_rate) {
case 162000:
default:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
}
}
 
dp_msg_len = 4;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_WRITE << 4;
dp_msg_len += send_bytes;
msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
/***** radeon specific DP functions *****/
 
if (send_bytes > 16)
return false;
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
*/
static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int lane_num;
int max_dp_pix_clock;
 
memcpy(&msg[4], send, send_bytes);
msg_len = 4 + send_bytes;
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
return ret;
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
if (pix_clock <= max_dp_pix_clock)
break;
}
 
bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
uint8_t delay, uint8_t expected_bytes,
uint8_t *read_p)
return lane_num;
}
 
static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[20];
u8 msg_len, dp_msg_len;
bool ret = false;
msg_len = 4;
dp_msg_len = 4;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (dp_msg_len) << 4;
msg[3] |= expected_bytes - 1;
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int lane_num, max_pix_clock;
 
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
return ret;
if (radeon_connector_encoder_is_dp_bridge(connector))
return 270000;
 
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 162000;
max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 270000;
if (radeon_connector_is_dp12_capable(connector)) {
max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 540000;
}
 
/* radeon dp functions */
static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
uint8_t ucconfig, uint8_t lane_num)
return dp_get_max_link_rate(dpcd);
}
 
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
int action, int dp_clock,
u8 ucconfig, u8 lane_num)
{
DP_ENCODER_SERVICE_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
454,18 → 523,15
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[25];
int ret;
int ret, i;
 
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
if (ret) {
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, 8);
{
int i;
DRM_DEBUG("DPCD: ");
DRM_DEBUG_KMS("DPCD: ");
for (i = 0; i < 8; i++)
DRM_DEBUG("%02x ", msg[i]);
DRM_DEBUG("\n");
}
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
return true;
}
dig_connector->dpcd[0] = 0;
472,338 → 538,384
return false;
}
 
static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 
if (!ASIC_IS_DCE4(rdev))
return;
 
if (radeon_connector_encoder_is_dp_bridge(connector))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
 
atombios_dig_encoder_setup(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
panel_mode);
}
 
void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
 
if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
 
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
 
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
dig_connector->dp_clock =
dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
dig_connector->dp_lane_count =
dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
}
}
 
int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
 
return dp_mode_valid(dig_connector->dpcd, mode->clock);
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
 
dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
 
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
u8 link_status[DP_LINK_STATUS_SIZE])
{
int ret;
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
DP_LINK_STATUS_SIZE, link_status);
if (!ret) {
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
DRM_ERROR("displayport link status failed\n");
return false;
}
 
DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
return true;
}
 
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
int enc_id;
int dp_clock;
int dp_lane_count;
int rd_interval;
bool tp3_supported;
u8 dpcd[8];
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
};
 
if (!atom_dp_get_link_status(radeon_connector, link_status))
return false;
if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
return false;
return true;
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
{
/* set the initial vs/emph on the source */
atombios_dig_transmitter_setup(dp_info->encoder,
ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
0, dp_info->train_set[0]); /* sets all lanes at once */
 
/* set the vs/emph on the sink */
radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
dp_info->train_set, dp_info->dp_lane_count, 0);
}
 
static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
int rtp = 0;
 
if (dig_connector->dpcd[0] >= 0x11) {
radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
&power_state);
/* set training pattern on the source */
if (ASIC_IS_DCE4(dp_info->rdev)) {
switch (tp) {
case DP_TRAINING_PATTERN_1:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
break;
case DP_TRAINING_PATTERN_2:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
break;
case DP_TRAINING_PATTERN_3:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
break;
}
atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
} else {
switch (tp) {
case DP_TRAINING_PATTERN_1:
rtp = 0;
break;
case DP_TRAINING_PATTERN_2:
rtp = 1;
break;
}
 
static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
{
radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
&downspread);
radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dp_info->dp_clock, dp_info->enc_id, rtp);
}
 
static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
link_configuration);
/* enable training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
}
 
static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
struct drm_encoder *encoder,
u8 train_set[4])
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
int i;
u8 tmp;
 
for (i = 0; i < dig_connector->dp_lane_count; i++)
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
i, train_set[i]);
/* power up the sink */
if (dp_info->dpcd[0] >= 0x11)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_SET_POWER, DP_SET_POWER_D0);
 
radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
dig_connector->dp_lane_count, train_set);
}
/* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
else
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
 
static void dp_set_training(struct radeon_connector *radeon_connector,
u8 training)
{
radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
1, &training);
}
radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
 
void dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
int enc_id = 0;
bool clock_recovery, channel_eq;
u8 link_status[DP_LINK_STATUS_SIZE];
u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
u8 tries, voltage;
u8 train_set[4];
int i;
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
if (dp_info->dpcd[0] >= 0x11)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
/* set the link rate on the sink */
tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
 
if (!radeon_encoder->enc_priv)
return;
dig = radeon_encoder->enc_priv;
/* start training on the source */
if (ASIC_IS_DCE4(dp_info->rdev))
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
else
radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
dp_info->dp_clock, dp_info->enc_id, 0);
 
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
/* disable the training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
 
if (dig->dig_encoder)
enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
if (dig_connector->linkb)
enc_id |= ATOM_DP_CONFIG_LINK_B;
else
enc_id |= ATOM_DP_CONFIG_LINK_A;
return 0;
}
 
memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
if (dig_connector->dp_clock == 270000)
link_configuration[0] = DP_LINK_BW_2_7;
static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
{
udelay(400);
 
/* disable the training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
 
/* disable the training pattern on the source */
if (ASIC_IS_DCE4(dp_info->rdev))
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
else
link_configuration[0] = DP_LINK_BW_1_62;
link_configuration[1] = dig_connector->dp_lane_count;
if (dig_connector->dpcd[0] >= 0x11)
link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dp_info->dp_clock, dp_info->enc_id, 0);
 
/* power up the sink */
dp_set_power(radeon_connector, DP_SET_POWER_D0);
/* disable the training pattern on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
/* set link bw and lanes on the sink */
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
if (ASIC_IS_DCE4(rdev)) {
/* start training on the source */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
/* set training pattern 1 on the source */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
} else {
/* start training on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
dig_connector->dp_clock, enc_id, 0);
/* set training pattern 1 on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 0);
return 0;
}
 
/* set initial vs/emph */
memset(train_set, 0, 4);
static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
{
bool clock_recovery;
u8 voltage;
int i;
 
radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
memset(dp_info->train_set, 0, 4);
radeon_dp_update_vs_emph(dp_info);
 
udelay(400);
/* set training pattern 1 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
 
dp_update_dpvs_emph(radeon_connector, encoder, train_set);
 
/* clock recovery loop */
clock_recovery = false;
tries = 0;
dp_info->tries = 0;
voltage = 0xff;
for (;;) {
while (1) {
if (dp_info->rd_interval == 0)
udelay(100);
if (!atom_dp_get_link_status(radeon_connector, link_status))
else
mdelay(dp_info->rd_interval * 4);
 
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
break;
 
if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
break;
}
 
for (i = 0; i < dig_connector->dp_lane_count; i++) {
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
for (i = 0; i < dp_info->dp_lane_count; i++) {
if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
}
if (i == dig_connector->dp_lane_count) {
if (i == dp_info->dp_lane_count) {
DRM_ERROR("clock recovery reached max voltage\n");
break;
}
 
if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5) {
if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++dp_info->tries;
if (dp_info->tries == 5) {
DRM_ERROR("clock recovery tried 5 times\n");
break;
}
} else
tries = 0;
dp_info->tries = 0;
 
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
/* Compute new train_set as requested by sink */
dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
dp_update_dpvs_emph(radeon_connector, encoder, train_set);
dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
 
radeon_dp_update_vs_emph(dp_info);
}
if (!clock_recovery)
if (!clock_recovery) {
DRM_ERROR("clock recovery failed\n");
else
DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
return -1;
} else {
DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
return 0;
}
}
 
static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
{
bool channel_eq;
 
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
if (dp_info->tp3_supported)
radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
else
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 1);
radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
 
/* channel equalization loop */
tries = 0;
dp_info->tries = 0;
channel_eq = false;
for (;;) {
while (1) {
if (dp_info->rd_interval == 0)
udelay(400);
if (!atom_dp_get_link_status(radeon_connector, link_status))
else
mdelay(dp_info->rd_interval * 4);
 
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
break;
 
if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
break;
}
 
/* Try 5 times */
if (tries > 5) {
if (dp_info->tries > 5) {
DRM_ERROR("channel eq failed: 5 tries\n");
break;
}
 
/* Compute new train_set as requested by sink */
dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
dp_update_dpvs_emph(radeon_connector, encoder, train_set);
dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
 
tries++;
radeon_dp_update_vs_emph(dp_info);
dp_info->tries++;
}
 
if (!channel_eq)
if (!channel_eq) {
DRM_ERROR("channel eq failed\n");
else
DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
return -1;
} else {
DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
 
/* disable the training pattern on the sink */
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
else
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
 
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
return 0;
}
}
 
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
int ret = 0;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
int msg_len, dp_msg_len;
int reply_bytes;
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
struct radeon_dp_link_train_info dp_info;
u8 tmp;
 
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[2] = AUX_I2C_READ << 4;
else
msg[2] = AUX_I2C_WRITE << 4;
if (!radeon_encoder->enc_priv)
return;
dig = radeon_encoder->enc_priv;
 
if (!(mode & MODE_I2C_STOP))
msg[2] |= AUX_I2C_MOT << 4;
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
 
msg[0] = address;
msg[1] = address >> 8;
if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
(dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
return;
 
reply_bytes = 1;
dp_info.enc_id = 0;
if (dig->dig_encoder)
dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
if (dig->linkb)
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
 
msg_len = 4;
dp_msg_len = 3;
switch (mode) {
case MODE_I2C_WRITE:
msg[4] = write_byte;
msg_len++;
dp_msg_len += 2;
break;
case MODE_I2C_READ:
dp_msg_len += 1;
break;
default:
break;
}
dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
 
msg[3] = (dp_msg_len) << 4;
ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
dp_info.radeon_connector = radeon_connector;
dp_info.dp_lane_count = dig_connector->dp_lane_count;
dp_info.dp_clock = dig_connector->dp_clock;
 
if (ret) {
if (read_byte)
*read_byte = reply[0];
return reply_bytes;
if (radeon_dp_link_train_init(&dp_info))
goto done;
if (radeon_dp_link_train_cr(&dp_info))
goto done;
if (radeon_dp_link_train_ce(&dp_info))
goto done;
done:
if (radeon_dp_link_train_finish(&dp_info))
return;
}
return -EREMOTEIO;
}
 
/drivers/video/drm/radeon/evergreen.c
23,14 → 23,19
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
 
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
 
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
 
37,7 → 42,36
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
/* XXX */
 
switch (hpd) {
case RADEON_HPD_1:
if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_2:
if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_3:
if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_4:
if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_5:
if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
default:
break;
}
 
return connected;
}
 
44,26 → 78,153
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
/* XXX */
u32 tmp;
bool connected = evergreen_hpd_sense(rdev, hpd);
 
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(DC_HPD1_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
break;
case RADEON_HPD_2:
tmp = RREG32(DC_HPD2_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
break;
case RADEON_HPD_3:
tmp = RREG32(DC_HPD3_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
break;
case RADEON_HPD_4:
tmp = RREG32(DC_HPD4_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
break;
case RADEON_HPD_5:
tmp = RREG32(DC_HPD5_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
break;
case RADEON_HPD_6:
tmp = RREG32(DC_HPD6_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
break;
default:
break;
}
}
 
#if 0
void evergreen_hpd_init(struct radeon_device *rdev)
{
/* XXX */
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
}
if (rdev->irq.installed)
evergreen_irq_set(rdev);
}
 
void evergreen_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
 
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
/* XXX */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
}
}
 
void evergreen_hpd_fini(struct radeon_device *rdev)
#endif
 
 
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
/* XXX */
}
 
static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
81,10 → 242,33
/*
* GART
*/
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
 
WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
if (tmp == 2) {
printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
return;
}
if (tmp) {
return;
}
udelay(1);
}
}
 
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
105,9 → 289,15
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
if (rdev->flags & RADEON_IS_IGP) {
WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
} else {
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
119,10 → 309,9
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
for (i = 1; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
WREG32(VM_CONTEXT1_CNTL, 0);
 
r600_pcie_gart_tlb_flush(rdev);
evergreen_pcie_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
130,11 → 319,11
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i, r;
int r;
 
/* Disable all tables */
for (i = 0; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
 
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
171,7 → 360,6
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
 
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
191,11 → 379,11
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
for (i = 0; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
}
 
static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
207,31 → 395,39
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (!(rdev->flags & RADEON_IS_IGP)) {
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
 
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
241,7 → 437,7
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
 
static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
261,6 → 457,7
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
 
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
296,6 → 493,7
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
 
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
311,26 → 509,32
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
 
static void evergreen_mc_program(struct radeon_device *rdev)
void evergreen_mc_program(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp;
374,12 → 578,18
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
WREG32(MC_FUS_VM_FB_OFFSET, tmp);
}
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
402,29 → 612,322
/*
* CP.
*/
static void evergreen_cp_stop(struct radeon_device *rdev)
 
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
/* XXX */
const __be32 *fw_data;
int i;
 
if (!rdev->me_fw || !rdev->pfp_fw)
return -EINVAL;
 
r700_cp_stop(rdev);
WREG32(CP_RB_CNTL,
#ifdef __BIG_ENDIAN
BUF_SWAP_32BIT |
#endif
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
 
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
 
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_ME_RAM_WADDR, 0);
WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
 
static int evergreen_cp_start(struct radeon_device *rdev)
{
int r, i;
uint32_t cp_me;
 
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
 
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
 
r = radeon_ring_lock(rdev, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
 
/* setup clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
for (i = 0; i < evergreen_default_size; i++)
radeon_ring_write(rdev, evergreen_default_state[i]);
 
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
 
/* Clear consts */
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
 
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev);
 
return 0;
}
 
int evergreen_cp_resume(struct radeon_device *rdev)
{
/* XXX */
u32 tmp;
u32 rb_bufsz;
int r;
 
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
SOFT_RESET_PA |
SOFT_RESET_SH |
SOFT_RESET_VGT |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
 
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
#ifdef __BIG_ENDIAN
RB_RPTR_SWAP(2) |
#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
 
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
 
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
evergreen_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
rdev->cp.ready = false;
return r;
}
return 0;
}
 
 
/*
* Core functions
*/
static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 cur_pipe;
u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
 
if (num_tile_pipes > EVERGREEN_MAX_PIPES)
num_tile_pipes = EVERGREEN_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > EVERGREEN_MAX_BACKENDS)
num_backends = EVERGREEN_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
 
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
 
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
 
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
 
force_no_swizzle = false;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
 
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
 
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
 
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
}
 
return backend_map;
}
#endif
431,12 → 934,661
 
static void evergreen_gpu_init(struct radeon_device *rdev)
{
/* XXX */
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
u32 sq_lds_resource_mgmt;
u32 sq_gpr_resource_mgmt_1;
u32 sq_gpr_resource_mgmt_2;
u32 sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt;
u32 sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1;
u32 sq_stack_resource_mgmt_2;
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
int i, j, num_shader_engines, ps_thread_count;
 
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
rdev->config.evergreen.num_ses = 2;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 8;
rdev->config.evergreen.max_simds = 10;
rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 10;
rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 5;
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_CEDAR:
default:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 192;
rdev->config.evergreen.max_gs_threads = 16;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 128;
rdev->config.evergreen.sx_max_export_pos_size = 32;
rdev->config.evergreen.sx_max_export_smx_size = 96;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 1;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 192;
rdev->config.evergreen.max_gs_threads = 16;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 128;
rdev->config.evergreen.sx_max_export_pos_size = 32;
rdev->config.evergreen.sx_max_export_smx_size = 96;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 1;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 2;
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
(rdev->pdev->device == 0x964a))
rdev->config.evergreen.max_simds = 4;
else
rdev->config.evergreen.max_simds = 5;
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 8;
rdev->config.evergreen.max_simds = 7;
rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 6;
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 192;
rdev->config.evergreen.max_gs_threads = 16;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 128;
rdev->config.evergreen.sx_max_export_pos_size = 32;
rdev->config.evergreen.sx_max_export_smx_size = 96;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 1;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
}
 
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
 
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
& EVERGREEN_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
& EVERGREEN_MAX_SIMDS_MASK);
 
cc_rb_backend_disable =
BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
& EVERGREEN_MAX_BACKENDS_MASK);
 
 
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if (rdev->flags & RADEON_IS_IGP)
mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
 
gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
 
if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
gb_addr_config |= ROW_SIZE(2);
else
gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
 
if (rdev->ddev->pdev->device == 0x689e) {
u32 efuse_straps_4;
u32 efuse_straps_3;
u8 efuse_box_bit_131_124;
 
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
 
switch(efuse_box_bit_131_124) {
case 0x00:
gb_backend_map = 0x76543210;
break;
case 0x55:
gb_backend_map = 0x77553311;
break;
case 0x56:
gb_backend_map = 0x77553300;
break;
case 0x59:
gb_backend_map = 0x77552211;
break;
case 0x66:
gb_backend_map = 0x77443300;
break;
case 0x99:
gb_backend_map = 0x66552211;
break;
case 0x5a:
gb_backend_map = 0x77552200;
break;
case 0xaa:
gb_backend_map = 0x66442200;
break;
case 0x95:
gb_backend_map = 0x66553311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else if (rdev->ddev->pdev->device == 0x68b9) {
u32 efuse_straps_3;
u8 efuse_box_bit_127_124;
 
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
 
switch(efuse_box_bit_127_124) {
case 0x0:
gb_backend_map = 0x00003210;
break;
case 0x5:
case 0x6:
case 0x9:
case 0xa:
gb_backend_map = 0x00003311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else {
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
gb_backend_map = 0x00006420;
break;
default:
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
}
}
 
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
* bits 7:4 num_banks
* bits 11:8 group_size
* bits 15:12 row_size
*/
rdev->config.evergreen.tile_config = 0;
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
rdev->config.evergreen.tile_config |= (0 << 0);
break;
case 2:
rdev->config.evergreen.tile_config |= (1 << 0);
break;
case 4:
rdev->config.evergreen.tile_config |= (2 << 0);
break;
case 8:
rdev->config.evergreen.tile_config |= (3 << 0);
break;
}
/* num banks is 8 on all fusion asics */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 8 << 4;
else
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
 
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
evergreen_program_channel_remap(rdev);
 
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
 
for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
u32 rb = cc_rb_backend_disable | (0xf0 << 16);
u32 sp = cc_gc_shader_pipe_config;
u32 gfx = grbm_gfx_index | SE_INDEX(i);
 
if (i == num_shader_engines) {
rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
}
 
WREG32(GRBM_GFX_INDEX, gfx);
WREG32(RLC_GFX_INDEX, gfx);
 
WREG32(CC_RB_BACKEND_DISABLE, rb);
WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
}
 
grbm_gfx_index |= SE_BROADCAST_WRITES;
WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
WREG32(RLC_GFX_INDEX, grbm_gfx_index);
 
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
 
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
ROQ_IB2_START(0x2b)));
 
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
 
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
SYNC_GRADIENT |
SYNC_WALKER |
SYNC_ALIGNER));
 
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
WREG32(SX_DEBUG_1, sx_debug_1);
 
 
smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
 
WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
 
WREG32(VGT_NUM_INSTANCES, 1);
WREG32(SPI_CONFIG_CNTL, 0);
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0);
 
WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
FETCH_FIFO_HIWATER(0x4) |
DONE_FIFO_HIWATER(0xe0) |
ALU_UPDATE_FIFO_HIWATER(0x8)));
 
sq_config = RREG32(SQ_CONFIG);
sq_config &= ~(PS_PRIO(3) |
VS_PRIO(3) |
GS_PRIO(3) |
ES_PRIO(3));
sq_config |= (VC_ENABLE |
EXPORT_SRC_C |
PS_PRIO(0) |
VS_PRIO(1) |
GS_PRIO(2) |
ES_PRIO(3));
 
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_CAICOS:
/* no vertex cache */
sq_config &= ~VC_ENABLE;
break;
default:
break;
}
 
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
 
sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
ps_thread_count = 96;
break;
default:
ps_thread_count = 128;
break;
}
 
sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
 
sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
 
WREG32(SQ_CONFIG, sq_config);
WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
 
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
 
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_CAICOS:
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
break;
default:
vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
break;
}
vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 
WREG32(VGT_GS_VERTEX_REUSE, 16);
WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
WREG32(VGT_OUT_DEALLOC_CNTL, 16);
 
WREG32(CB_PERF_CTR0_SEL_0, 0);
WREG32(CB_PERF_CTR0_SEL_1, 0);
WREG32(CB_PERF_CTR1_SEL_0, 0);
WREG32(CB_PERF_CTR1_SEL_1, 0);
WREG32(CB_PERF_CTR2_SEL_0, 0);
WREG32(CB_PERF_CTR2_SEL_1, 0);
WREG32(CB_PERF_CTR3_SEL_0, 0);
WREG32(CB_PERF_CTR3_SEL_1, 0);
 
/* clear render buffer base addresses */
WREG32(CB_COLOR0_BASE, 0);
WREG32(CB_COLOR1_BASE, 0);
WREG32(CB_COLOR2_BASE, 0);
WREG32(CB_COLOR3_BASE, 0);
WREG32(CB_COLOR4_BASE, 0);
WREG32(CB_COLOR5_BASE, 0);
WREG32(CB_COLOR6_BASE, 0);
WREG32(CB_COLOR7_BASE, 0);
WREG32(CB_COLOR8_BASE, 0);
WREG32(CB_COLOR9_BASE, 0);
WREG32(CB_COLOR10_BASE, 0);
WREG32(CB_COLOR11_BASE, 0);
 
/* set the shader const cache sizes to 0 */
for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
WREG32(i, 0);
for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
WREG32(i, 0);
 
tmp = RREG32(HDP_MISC_CNTL);
tmp |= HDP_FLUSH_INVALIDATE_CACHE;
WREG32(HDP_MISC_CNTL, tmp);
 
hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
 
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
 
udelay(50);
 
}
 
int evergreen_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
int chansize, numchan;
 
468,37 → 1620,159
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
if (rdev->flags & RADEON_IS_IGP) {
/* size in bytes on fusion */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
 
return 0;
}
 
int evergreen_gpu_reset(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
return false;
}
 
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 grbm_reset = 0;
 
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0;
 
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
 
/* reset all the gfx blocks */
grbm_reset = (SOFT_RESET_CP |
SOFT_RESET_CB |
SOFT_RESET_DB |
SOFT_RESET_PA |
SOFT_RESET_SC |
SOFT_RESET_SPI |
SOFT_RESET_SH |
SOFT_RESET_SX |
SOFT_RESET_TC |
SOFT_RESET_TA |
SOFT_RESET_VC |
SOFT_RESET_VGT);
 
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
WREG32(GRBM_SOFT_RESET, grbm_reset);
(void)RREG32(GRBM_SOFT_RESET);
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
evergreen_mc_resume(rdev, &save);
return 0;
}
 
int evergreen_asic_reset(struct radeon_device *rdev)
{
return evergreen_gpu_soft_reset(rdev);
}
 
/* Interrupts */
 
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
switch (crtc) {
case 0:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
case 1:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
case 2:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
case 3:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
case 4:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
case 5:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
default:
return 0;
}
}
 
void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
 
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (!(rdev->flags & RADEON_IS_IGP)) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
 
}
static int evergreen_startup(struct radeon_device *rdev)
{
#if 0
int r;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
508,20 → 1782,39
return r;
}
}
#endif
 
evergreen_mc_program(rdev);
#if 0
if (rdev->flags & RADEON_IS_AGP) {
evergreem_agp_enable(rdev);
evergreen_agp_enable(rdev);
} else {
r = evergreen_pcie_gart_enable(rdev);
if (r)
return r;
}
#endif
evergreen_gpu_init(rdev);
#if 0
r = evergreen_blit_init(rdev);
if (r) {
evergreen_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
#endif
 
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
radeon_irq_kms_fini(rdev);
return r;
}
// evergreen_irq_set(rdev);
 
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
528,12 → 1821,12
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
r = r600_cp_resume(rdev);
r = evergreen_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
#endif
 
return 0;
}
 
547,11 → 1840,6
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
/* Initialize clocks */
r = radeon_clocks_init(rdev);
if (r) {
return r;
}
 
r = evergreen_startup(rdev);
if (r) {
571,7 → 1859,6
 
int evergreen_suspend(struct radeon_device *rdev)
{
#if 0
int r;
 
/* FIXME: we should wait for ring to be empty */
579,6 → 1866,7
rdev->cp.ready = false;
r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
655,12 → 1943,10
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
/* Initialize power management */
radeon_pm_init(rdev);
/* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r)
// return r;
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
675,7 → 1961,7
r = radeon_bo_init(rdev);
if (r)
return r;
#if 0
 
r = radeon_irq_kms_init(rdev);
if (r)
return r;
683,38 → 1969,52
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
// rdev->ih.ring_obj = NULL;
// r600_ih_ring_init(rdev, 64 * 1024);
 
r = r600_pcie_gart_init(rdev);
if (r)
return r;
#endif
rdev->accel_working = false;
 
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
evergreen_suspend(rdev);
/*r600_wb_fini(rdev);*/
/*radeon_ring_fini(rdev);*/
/*evergreen_pcie_gart_fini(rdev);*/
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
return 0;
}
 
void evergreen_fini(struct radeon_device *rdev)
{
evergreen_suspend(rdev);
#if 0
r600_blit_fini(rdev);
/*r600_blit_fini(rdev);*/
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
evergreen_pcie_gart_fini(rdev);
#endif
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
/drivers/video/drm/radeon/evergreen_reg.h
61,6 → 61,11
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
100,6 → 105,11
#define EVERGREEN_GRPH_Y_START 0x6830
#define EVERGREEN_GRPH_X_END 0x6834
#define EVERGREEN_GRPH_Y_END 0x6838
#define EVERGREEN_GRPH_UPDATE 0x6844
# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
 
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
#define EVERGREEN_CUR_CONTROL 0x6998
151,6 → 161,9
#define EVERGREEN_DATA_FORMAT 0x6b00
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
#define EVERGREEN_VLINE_START_END 0x6b08
#define EVERGREEN_VLINE_STATUS 0x6bb8
# define EVERGREEN_VLINE_STAT (1 << 12)
 
#define EVERGREEN_VIEWPORT_START 0x6d70
#define EVERGREEN_VIEWPORT_SIZE 0x6d74
164,8 → 177,13
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
 
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
 
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
/drivers/video/drm/radeon/evergreend.h
0,0 → 1,1128
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#ifndef EVERGREEND_H
#define EVERGREEND_H
 
#define EVERGREEN_MAX_SH_GPRS 256
#define EVERGREEN_MAX_TEMP_GPRS 16
#define EVERGREEN_MAX_SH_THREADS 256
#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
#define EVERGREEN_MAX_FRC_EOV_CNT 16384
#define EVERGREEN_MAX_BACKENDS 8
#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
#define EVERGREEN_MAX_SIMDS 16
#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
#define EVERGREEN_MAX_PIPES 8
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
 
/* Registers */
 
#define RCU_IND_INDEX 0x100
#define RCU_IND_DATA 0x104
 
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
#define RLC_GFX_INDEX 0x3fC4
#define CC_GC_SHADER_PIPE_CONFIG 0x8950
#define WRITE_DIS (1 << 0)
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
#define NUM_GPUS(x) ((x) << 20)
#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
#define ROW_SIZE(x) ((x) << 28)
#define GB_BACKEND_MAP 0x98FC
#define DMIF_ADDR_CONFIG 0xBD4
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
 
#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
 
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_TCC_DISABLE 0x9148
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
#define CGTS_USER_TCC_DISABLE 0x914C
 
#define CONFIG_MEMSIZE 0x5428
 
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
#define CP_ME_RAM_DATA 0xC160
#define CP_ME_RAM_RADDR 0xC158
#define CP_ME_RAM_WADDR 0xC15C
#define CP_MEQ_THRESHOLDS 0x8764
#define STQ_SPLIT(x) ((x) << 0)
#define CP_PERFMON_CNTL 0x87FC
#define CP_PFP_UCODE_ADDR 0xC150
#define CP_PFP_UCODE_DATA 0xC154
#define CP_QUEUE_THRESHOLDS 0x8760
#define ROQ_IB1_START(x) ((x) << 0)
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_RB_BASE 0xC100
#define CP_RB_CNTL 0xC104
#define RB_BUFSZ(x) ((x) << 0)
#define RB_BLKSZ(x) ((x) << 8)
#define RB_NO_UPDATE (1 << 27)
#define RB_RPTR_WR_ENA (1 << 31)
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
#define RB_RPTR_SWAP(x) ((x) << 0)
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
#define CP_RB_WPTR_ADDR 0xC118
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_DEBUG 0xC1FC
 
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
 
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1 << 0)
#define SOFT_RESET_CB (1 << 1)
#define SOFT_RESET_DB (1 << 3)
#define SOFT_RESET_PA (1 << 5)
#define SOFT_RESET_SC (1 << 6)
#define SOFT_RESET_SPI (1 << 8)
#define SOFT_RESET_SH (1 << 9)
#define SOFT_RESET_SX (1 << 10)
#define SOFT_RESET_TC (1 << 11)
#define SOFT_RESET_TA (1 << 12)
#define SOFT_RESET_VC (1 << 13)
#define SOFT_RESET_VGT (1 << 14)
 
#define GRBM_STATUS 0x8010
#define CMDFIFO_AVAIL_MASK 0x0000000F
#define SRBM_RQ_PENDING (1 << 5)
#define CF_RQ_PENDING (1 << 7)
#define PF_RQ_PENDING (1 << 8)
#define GRBM_EE_BUSY (1 << 10)
#define SX_CLEAN (1 << 11)
#define DB_CLEAN (1 << 12)
#define CB_CLEAN (1 << 13)
#define TA_BUSY (1 << 14)
#define VGT_BUSY_NO_DMA (1 << 16)
#define VGT_BUSY (1 << 17)
#define SX_BUSY (1 << 20)
#define SH_BUSY (1 << 21)
#define SPI_BUSY (1 << 22)
#define SC_BUSY (1 << 24)
#define PA_BUSY (1 << 25)
#define DB_BUSY (1 << 26)
#define CP_COHERENCY_BUSY (1 << 28)
#define CP_BUSY (1 << 29)
#define CB_BUSY (1 << 30)
#define GUI_ACTIVE (1 << 31)
#define GRBM_STATUS_SE0 0x8014
#define GRBM_STATUS_SE1 0x8018
#define SE_SX_CLEAN (1 << 0)
#define SE_DB_CLEAN (1 << 1)
#define SE_CB_CLEAN (1 << 2)
#define SE_TA_BUSY (1 << 25)
#define SE_SX_BUSY (1 << 26)
#define SE_SPI_BUSY (1 << 27)
#define SE_SH_BUSY (1 << 28)
#define SE_SC_BUSY (1 << 29)
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
/* evergreen */
#define CG_THERMAL_CTRL 0x72c
#define TOFFSET_MASK 0x00003FE0
#define TOFFSET_SHIFT 5
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x07FF0000
#define ASIC_T_SHIFT 16
#define CG_TS0_STATUS 0x760
#define TS0_ADC_DOUT_MASK 0x000003FF
#define TS0_ADC_DOUT_SHIFT 0
/* APU */
#define CG_THERMAL_STATUS 0x678
 
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
 
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
#define MC_SHARED_CHREMAP 0x2008
 
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
#define NOOFRANK_SHIFT 2
#define NOOFRANK_MASK 0x00000004
#define NOOFROWS_SHIFT 3
#define NOOFROWS_MASK 0x00000038
#define NOOFCOLS_SHIFT 6
#define NOOFCOLS_MASK 0x000000C0
#define CHANSIZE_SHIFT 8
#define CHANSIZE_MASK 0x00000100
#define BURSTLENGTH_SHIFT 9
#define BURSTLENGTH_MASK 0x00000200
#define CHANSIZE_OVERRIDE (1 << 11)
#define FUS_MC_ARB_RAMCFG 0x2768
#define MC_VM_AGP_TOP 0x2028
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
#define MC_VM_FB_LOCATION 0x2024
#define MC_FUS_VM_FB_OFFSET 0x2898
#define MC_VM_MB_L1_TLB0_CNTL 0x2234
#define MC_VM_MB_L1_TLB1_CNTL 0x2238
#define MC_VM_MB_L1_TLB2_CNTL 0x223C
#define MC_VM_MB_L1_TLB3_CNTL 0x2240
#define ENABLE_L1_TLB (1 << 0)
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
 
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664
 
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
 
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
#define PA_SC_CLIPRECT_RULE 0x2820C
#define PA_SC_EDGERULE 0x28230
#define PA_SC_FIFO_SIZE 0x8BCC
#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
#define PA_SC_LINE_STIPPLE 0x28A0C
#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
#define PA_SC_LINE_STIPPLE_STATE 0x8B10
 
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
#define SCRATCH_REG3 0x850C
#define SCRATCH_REG4 0x8510
#define SCRATCH_REG5 0x8514
#define SCRATCH_REG6 0x8518
#define SCRATCH_REG7 0x851C
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
 
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
#define FLUSH_ALL_ON_EVENT (1 << 10)
#define STALL_ON_EVENT (1 << 11)
#define SMX_EVENT_CTL 0xA02C
#define ES_FLUSH_CTL(x) ((x) << 0)
#define GS_FLUSH_CTL(x) ((x) << 3)
#define ACK_FLUSH_CTL(x) ((x) << 6)
#define SYNC_FLUSH_CTL (1 << 8)
 
#define SPI_CONFIG_CNTL 0x9100
#define GPR_WRITE_PRIORITY(x) ((x) << 0)
#define SPI_CONFIG_CNTL_1 0x913C
#define VTX_DONE_DELAY(x) ((x) << 0)
#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
#define SPI_INPUT_Z 0x286D8
#define SPI_PS_IN_CONTROL_0 0x286CC
#define NUM_INTERP(x) ((x)<<0)
#define POSITION_ENA (1<<8)
#define POSITION_CENTROID (1<<9)
#define POSITION_ADDR(x) ((x)<<10)
#define PARAM_GEN(x) ((x)<<15)
#define PARAM_GEN_ADDR(x) ((x)<<19)
#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
#define PERSP_GRADIENT_ENA (1<<28)
#define LINEAR_GRADIENT_ENA (1<<29)
#define POSITION_SAMPLE (1<<30)
#define BARYC_AT_SAMPLE_ENA (1<<31)
 
#define SQ_CONFIG 0x8C00
#define VC_ENABLE (1 << 0)
#define EXPORT_SRC_C (1 << 1)
#define CS_PRIO(x) ((x) << 18)
#define LS_PRIO(x) ((x) << 20)
#define HS_PRIO(x) ((x) << 22)
#define PS_PRIO(x) ((x) << 24)
#define VS_PRIO(x) ((x) << 26)
#define GS_PRIO(x) ((x) << 28)
#define ES_PRIO(x) ((x) << 30)
#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
#define NUM_PS_GPRS(x) ((x) << 0)
#define NUM_VS_GPRS(x) ((x) << 16)
#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
#define NUM_GS_GPRS(x) ((x) << 0)
#define NUM_ES_GPRS(x) ((x) << 16)
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
#define NUM_GS_THREADS(x) ((x) << 16)
#define NUM_ES_THREADS(x) ((x) << 24)
#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
#define NUM_HS_THREADS(x) ((x) << 0)
#define NUM_LS_THREADS(x) ((x) << 8)
#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
 
#define SQ_MS_FIFO_SIZES 0x8CF0
#define CACHE_FIFO_SIZE(x) ((x) << 0)
#define FETCH_FIFO_HIWATER(x) ((x) << 8)
#define DONE_FIFO_HIWATER(x) ((x) << 16)
#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
 
#define SX_DEBUG_1 0x9058
#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
#define SX_EXPORT_BUFFER_SIZES 0x900C
#define COLOR_BUFFER_SIZE(x) ((x) << 0)
#define POSITION_BUFFER_SIZE(x) ((x) << 8)
#define SMX_BUFFER_SIZE(x) ((x) << 16)
#define SX_MISC 0x28350
 
#define CB_PERF_CTR0_SEL_0 0x9A20
#define CB_PERF_CTR0_SEL_1 0x9A24
#define CB_PERF_CTR1_SEL_0 0x9A28
#define CB_PERF_CTR1_SEL_1 0x9A2C
#define CB_PERF_CTR2_SEL_0 0x9A30
#define CB_PERF_CTR2_SEL_1 0x9A34
#define CB_PERF_CTR3_SEL_0 0x9A38
#define CB_PERF_CTR3_SEL_1 0x9A3C
 
#define TA_CNTL_AUX 0x9508
#define DISABLE_CUBE_WRAP (1 << 0)
#define DISABLE_CUBE_ANISO (1 << 1)
#define SYNC_GRADIENT (1 << 24)
#define SYNC_WALKER (1 << 25)
#define SYNC_ALIGNER (1 << 26)
 
#define TCP_CHAN_STEER_LO 0x960c
#define TCP_CHAN_STEER_HI 0x9610
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x) << 0)
#define VC_ONLY 0
#define TC_ONLY 1
#define VC_AND_TC 2
#define AUTO_INVLD_EN(x) ((x) << 6)
#define NO_AUTO 0
#define ES_AUTO 1
#define GS_AUTO 2
#define ES_AND_GS_AUTO 3
#define VGT_GS_VERTEX_REUSE 0x88D4
#define VGT_NUM_INSTANCES 0x8974
#define VGT_OUT_DEALLOC_CNTL 0x28C5C
#define DEALLOC_DIST_MASK 0x0000007F
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
 
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
#define RESPONSE_TYPE_SHIFT 4
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
#define VM_L2_CNTL2 0x1404
#define INVALIDATE_ALL_L1_TLBS (1 << 0)
#define INVALIDATE_L2_CACHE (1 << 1)
#define VM_L2_CNTL3 0x1408
#define BANK_SELECT(x) ((x) << 0)
#define CACHE_UPDATE_MODE(x) ((x) << 6)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
 
#define WAIT_UNTIL 0x8040
 
#define SRBM_STATUS 0x0E50
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
#define SOFT_RESET_CG (1 << 2)
#define SOFT_RESET_DC (1 << 5)
#define SOFT_RESET_GRBM (1 << 8)
#define SOFT_RESET_HDP (1 << 9)
#define SOFT_RESET_IH (1 << 10)
#define SOFT_RESET_MC (1 << 11)
#define SOFT_RESET_RLC (1 << 13)
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_TST (1 << 21)
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
 
/* display watermarks */
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define PRIORITY_A_CNT 0x6b18
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
#define PRIORITY_B_CNT 0x6b1c
#define PIPE0_ARBITRATION_CONTROL3 0x0bf0
# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
#define PIPE0_LATENCY_CONTROL 0x0bf4
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
 
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
#define IH_RB_BASE 0x3e04
#define IH_RB_RPTR 0x3e08
#define IH_RB_WPTR 0x3e0c
# define RB_OVERFLOW (1 << 0)
# define WPTR_OFFSET_MASK 0x3fffc
#define IH_RB_WPTR_ADDR_HI 0x3e10
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
# define IH_MC_SWAP(x) ((x) << 2)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
# define IH_MC_SWAP_64BIT 3
# define RPTR_REARM (1 << 4)
# define MC_WRREQ_CREDIT(x) ((x) << 15)
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
 
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define SCRATCH_INT_ENABLE (1 << 25)
# define TIME_STAMP_INT_ENABLE (1 << 26)
# define IB2_INT_ENABLE (1 << 29)
# define IB1_INT_ENABLE (1 << 30)
# define RB_INT_ENABLE (1 << 31)
#define CP_INT_STATUS 0xc128
# define SCRATCH_INT_STAT (1 << 25)
# define TIME_STAMP_INT_STAT (1 << 26)
# define IB2_INT_STAT (1 << 29)
# define IB1_INT_STAT (1 << 30)
# define RB_INT_STAT (1 << 31)
 
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
 
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
 
/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
#define VLINE_STATUS 0x6bb8
# define VLINE_OCCURRED (1 << 0)
# define VLINE_ACK (1 << 4)
# define VLINE_STAT (1 << 12)
# define VLINE_INTERRUPT (1 << 16)
# define VLINE_INTERRUPT_TYPE (1 << 17)
/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
#define VBLANK_STATUS 0x6bbc
# define VBLANK_OCCURRED (1 << 0)
# define VBLANK_ACK (1 << 4)
# define VBLANK_STAT (1 << 12)
# define VBLANK_INTERRUPT (1 << 16)
# define VBLANK_INTERRUPT_TYPE (1 << 17)
 
/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
#define INT_MASK 0x6b40
# define VBLANK_INT_MASK (1 << 0)
# define VLINE_INT_MASK (1 << 4)
 
#define DISP_INTERRUPT_STATUS 0x60f4
# define LB_D1_VLINE_INTERRUPT (1 << 2)
# define LB_D1_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD1_INTERRUPT (1 << 17)
# define DC_HPD1_RX_INTERRUPT (1 << 18)
# define DACA_AUTODETECT_INTERRUPT (1 << 22)
# define DACB_AUTODETECT_INTERRUPT (1 << 23)
# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
# define LB_D2_VLINE_INTERRUPT (1 << 2)
# define LB_D2_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD2_INTERRUPT (1 << 17)
# define DC_HPD2_RX_INTERRUPT (1 << 18)
# define DISP_TIMER_INTERRUPT (1 << 24)
#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
# define LB_D3_VLINE_INTERRUPT (1 << 2)
# define LB_D3_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD3_INTERRUPT (1 << 17)
# define DC_HPD3_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
# define LB_D4_VLINE_INTERRUPT (1 << 2)
# define LB_D4_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD4_INTERRUPT (1 << 17)
# define DC_HPD4_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
# define LB_D5_VLINE_INTERRUPT (1 << 2)
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
# define DC_HPD6_RX_INTERRUPT (1 << 18)
 
/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
#define GRPH_INT_STATUS 0x6858
# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
# define GRPH_PFLIP_INT_CLEAR (1 << 8)
/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
#define GRPH_INT_CONTROL 0x685c
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
 
#define DACA_AUTODETECT_INT_CONTROL 0x66c8
#define DACB_AUTODETECT_INT_CONTROL 0x67c8
 
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
#define DC_HPD3_INT_STATUS 0x6034
#define DC_HPD4_INT_STATUS 0x6040
#define DC_HPD5_INT_STATUS 0x604c
#define DC_HPD6_INT_STATUS 0x6058
# define DC_HPDx_INT_STATUS (1 << 0)
# define DC_HPDx_SENSE (1 << 1)
# define DC_HPDx_RX_INT_STATUS (1 << 8)
 
#define DC_HPD1_INT_CONTROL 0x6020
#define DC_HPD2_INT_CONTROL 0x602c
#define DC_HPD3_INT_CONTROL 0x6038
#define DC_HPD4_INT_CONTROL 0x6044
#define DC_HPD5_INT_CONTROL 0x6050
#define DC_HPD6_INT_CONTROL 0x605c
# define DC_HPDx_INT_ACK (1 << 0)
# define DC_HPDx_INT_POLARITY (1 << 8)
# define DC_HPDx_INT_EN (1 << 16)
# define DC_HPDx_RX_INT_ACK (1 << 20)
# define DC_HPDx_RX_INT_EN (1 << 24)
 
#define DC_HPD1_CONTROL 0x6024
#define DC_HPD2_CONTROL 0x6030
#define DC_HPD3_CONTROL 0x603c
#define DC_HPD4_CONTROL 0x6048
#define DC_HPD5_CONTROL 0x6054
#define DC_HPD6_CONTROL 0x6060
# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
 
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
# define LC_LINK_WIDTH_MASK 0x7
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
# define LC_LINK_WIDTH_RD_SHIFT 4
# define LC_LINK_WIDTH_RD_MASK 0x70
# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
# define LC_RECONFIG_NOW (1 << 8)
# define LC_RENEGOTIATION_SUPPORT (1 << 9)
# define LC_RENEGOTIATE_EN (1 << 10)
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
#define MM_CFGREGS_CNTL 0x544c
# define MM_WR_TO_CFG_EN (1 << 3)
#define LINK_CNTL2 0x88 /* F0 */
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/*
* PM4
*/
#define PACKET_TYPE0 0
#define PACKET_TYPE1 1
#define PACKET_TYPE2 2
#define PACKET_TYPE3 3
 
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
#define PACKET2_PAD_SHIFT 0
#define PACKET2_PAD_MASK (0x3fffffff << 0)
 
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
 
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
 
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
#define PACKET3_CLEAR_STATE 0x12
#define PACKET3_INDEX_BUFFER_SIZE 0x13
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
#define PACKET3_PRED_EXEC 0x23
#define PACKET3_DRAW_INDIRECT 0x24
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
#define PACKET3_INDEX_BASE 0x26
#define PACKET3_DRAW_INDEX_2 0x27
#define PACKET3_CONTEXT_CONTROL 0x28
#define PACKET3_DRAW_INDEX_OFFSET 0x29
#define PACKET3_INDEX_TYPE 0x2A
#define PACKET3_DRAW_INDEX 0x2B
#define PACKET3_DRAW_INDEX_AUTO 0x2D
#define PACKET3_DRAW_INDEX_IMMD 0x2E
#define PACKET3_NUM_INSTANCES 0x2F
#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
# define PACKET3_FULL_CACHE_ENA (1 << 20)
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SX_ACTION_ENA (1 << 28)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define PACKET3_EVENT_WRITE_EOP 0x47
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
#define PACKET3_RB_OFFSET 0x4B
#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
#define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_START 0x00008000
#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
#define PACKET3_SET_CONTEXT_REG 0x69
#define PACKET3_SET_CONTEXT_REG_START 0x00028000
#define PACKET3_SET_CONTEXT_REG_END 0x00029000
#define PACKET3_SET_ALU_CONST 0x6A
/* alu const buffers only; no reg file */
#define PACKET3_SET_BOOL_CONST 0x6B
#define PACKET3_SET_BOOL_CONST_START 0x0003a500
#define PACKET3_SET_BOOL_CONST_END 0x0003a518
#define PACKET3_SET_LOOP_CONST 0x6C
#define PACKET3_SET_LOOP_CONST_START 0x0003a200
#define PACKET3_SET_LOOP_CONST_END 0x0003a500
#define PACKET3_SET_RESOURCE 0x6D
#define PACKET3_SET_RESOURCE_START 0x00030000
#define PACKET3_SET_RESOURCE_END 0x00038000
#define PACKET3_SET_SAMPLER 0x6E
#define PACKET3_SET_SAMPLER_START 0x0003c000
#define PACKET3_SET_SAMPLER_END 0x0003c600
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_START 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
#define PACKET3_SET_RESOURCE_OFFSET 0x70
#define PACKET3_SET_ALU_CONST_VS 0x71
#define PACKET3_SET_ALU_CONST_DI 0x72
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75
 
#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
#define SQ_TEX_VTX_INVALID_BUFFER 0x1
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
 
#define SQ_CONST_MEM_BASE 0x8df8
 
#define SQ_ESGS_RING_BASE 0x8c40
#define SQ_ESGS_RING_SIZE 0x8c44
#define SQ_GSVS_RING_BASE 0x8c48
#define SQ_GSVS_RING_SIZE 0x8c4c
#define SQ_ESTMP_RING_BASE 0x8c50
#define SQ_ESTMP_RING_SIZE 0x8c54
#define SQ_GSTMP_RING_BASE 0x8c58
#define SQ_GSTMP_RING_SIZE 0x8c5c
#define SQ_VSTMP_RING_BASE 0x8c60
#define SQ_VSTMP_RING_SIZE 0x8c64
#define SQ_PSTMP_RING_BASE 0x8c68
#define SQ_PSTMP_RING_SIZE 0x8c6c
#define SQ_LSTMP_RING_BASE 0x8e10
#define SQ_LSTMP_RING_SIZE 0x8e14
#define SQ_HSTMP_RING_BASE 0x8e18
#define SQ_HSTMP_RING_SIZE 0x8e1c
#define VGT_TF_RING_SIZE 0x8988
 
#define SQ_ESGS_RING_ITEMSIZE 0x28900
#define SQ_GSVS_RING_ITEMSIZE 0x28904
#define SQ_ESTMP_RING_ITEMSIZE 0x28908
#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
#define SQ_VSTMP_RING_ITEMSIZE 0x28910
#define SQ_PSTMP_RING_ITEMSIZE 0x28914
#define SQ_LSTMP_RING_ITEMSIZE 0x28830
#define SQ_HSTMP_RING_ITEMSIZE 0x28834
 
#define SQ_GS_VERT_ITEMSIZE 0x2891c
#define SQ_GS_VERT_ITEMSIZE_1 0x28920
#define SQ_GS_VERT_ITEMSIZE_2 0x28924
#define SQ_GS_VERT_ITEMSIZE_3 0x28928
#define SQ_GSVS_RING_OFFSET_1 0x2892c
#define SQ_GSVS_RING_OFFSET_2 0x28930
#define SQ_GSVS_RING_OFFSET_3 0x28934
 
#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140
#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80
 
#define SQ_ALU_CONST_CACHE_PS_0 0x28940
#define SQ_ALU_CONST_CACHE_PS_1 0x28944
#define SQ_ALU_CONST_CACHE_PS_2 0x28948
#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
#define SQ_ALU_CONST_CACHE_PS_4 0x28950
#define SQ_ALU_CONST_CACHE_PS_5 0x28954
#define SQ_ALU_CONST_CACHE_PS_6 0x28958
#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
#define SQ_ALU_CONST_CACHE_PS_8 0x28960
#define SQ_ALU_CONST_CACHE_PS_9 0x28964
#define SQ_ALU_CONST_CACHE_PS_10 0x28968
#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
#define SQ_ALU_CONST_CACHE_PS_12 0x28970
#define SQ_ALU_CONST_CACHE_PS_13 0x28974
#define SQ_ALU_CONST_CACHE_PS_14 0x28978
#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
#define SQ_ALU_CONST_CACHE_VS_0 0x28980
#define SQ_ALU_CONST_CACHE_VS_1 0x28984
#define SQ_ALU_CONST_CACHE_VS_2 0x28988
#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
#define SQ_ALU_CONST_CACHE_VS_4 0x28990
#define SQ_ALU_CONST_CACHE_VS_5 0x28994
#define SQ_ALU_CONST_CACHE_VS_6 0x28998
#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
 
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
#define VGT_PRIMITIVE_TYPE 0x8958
 
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
#define DB_Z_WRITE_BASE 0x28050
#define DB_STENCIL_WRITE_BASE 0x28054
#define DB_DEPTH_SIZE 0x28058
 
#define SQ_PGM_START_PS 0x28840
#define SQ_PGM_START_VS 0x2885c
#define SQ_PGM_START_GS 0x28874
#define SQ_PGM_START_ES 0x2888c
#define SQ_PGM_START_FS 0x288a4
#define SQ_PGM_START_HS 0x288b8
#define SQ_PGM_START_LS 0x288d0
 
#define VGT_STRMOUT_CONFIG 0x28b94
#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
 
#define CB_TARGET_MASK 0x28238
#define CB_SHADER_MASK 0x2823c
 
#define GDS_ADDR_BASE 0x28720
 
#define CB_IMMED0_BASE 0x28b9c
#define CB_IMMED1_BASE 0x28ba0
#define CB_IMMED2_BASE 0x28ba4
#define CB_IMMED3_BASE 0x28ba8
#define CB_IMMED4_BASE 0x28bac
#define CB_IMMED5_BASE 0x28bb0
#define CB_IMMED6_BASE 0x28bb4
#define CB_IMMED7_BASE 0x28bb8
#define CB_IMMED8_BASE 0x28bbc
#define CB_IMMED9_BASE 0x28bc0
#define CB_IMMED10_BASE 0x28bc4
#define CB_IMMED11_BASE 0x28bc8
 
/* all 12 CB blocks have these regs */
#define CB_COLOR0_BASE 0x28c60
#define CB_COLOR0_PITCH 0x28c64
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
#define CB_COLOR0_INFO 0x28c70
# define CB_ARRAY_MODE(x) ((x) << 8)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
#define CB_COLOR0_ATTRIB 0x28c74
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
#define CB_COLOR0_CMASK_SLICE 0x28c80
#define CB_COLOR0_FMASK 0x28c84
#define CB_COLOR0_FMASK_SLICE 0x28c88
#define CB_COLOR0_CLEAR_WORD0 0x28c8c
#define CB_COLOR0_CLEAR_WORD1 0x28c90
#define CB_COLOR0_CLEAR_WORD2 0x28c94
#define CB_COLOR0_CLEAR_WORD3 0x28c98
 
#define CB_COLOR1_BASE 0x28c9c
#define CB_COLOR2_BASE 0x28cd8
#define CB_COLOR3_BASE 0x28d14
#define CB_COLOR4_BASE 0x28d50
#define CB_COLOR5_BASE 0x28d8c
#define CB_COLOR6_BASE 0x28dc8
#define CB_COLOR7_BASE 0x28e04
#define CB_COLOR8_BASE 0x28e40
#define CB_COLOR9_BASE 0x28e5c
#define CB_COLOR10_BASE 0x28e78
#define CB_COLOR11_BASE 0x28e94
 
#define CB_COLOR1_PITCH 0x28ca0
#define CB_COLOR2_PITCH 0x28cdc
#define CB_COLOR3_PITCH 0x28d18
#define CB_COLOR4_PITCH 0x28d54
#define CB_COLOR5_PITCH 0x28d90
#define CB_COLOR6_PITCH 0x28dcc
#define CB_COLOR7_PITCH 0x28e08
#define CB_COLOR8_PITCH 0x28e44
#define CB_COLOR9_PITCH 0x28e60
#define CB_COLOR10_PITCH 0x28e7c
#define CB_COLOR11_PITCH 0x28e98
 
#define CB_COLOR1_SLICE 0x28ca4
#define CB_COLOR2_SLICE 0x28ce0
#define CB_COLOR3_SLICE 0x28d1c
#define CB_COLOR4_SLICE 0x28d58
#define CB_COLOR5_SLICE 0x28d94
#define CB_COLOR6_SLICE 0x28dd0
#define CB_COLOR7_SLICE 0x28e0c
#define CB_COLOR8_SLICE 0x28e48
#define CB_COLOR9_SLICE 0x28e64
#define CB_COLOR10_SLICE 0x28e80
#define CB_COLOR11_SLICE 0x28e9c
 
#define CB_COLOR1_VIEW 0x28ca8
#define CB_COLOR2_VIEW 0x28ce4
#define CB_COLOR3_VIEW 0x28d20
#define CB_COLOR4_VIEW 0x28d5c
#define CB_COLOR5_VIEW 0x28d98
#define CB_COLOR6_VIEW 0x28dd4
#define CB_COLOR7_VIEW 0x28e10
#define CB_COLOR8_VIEW 0x28e4c
#define CB_COLOR9_VIEW 0x28e68
#define CB_COLOR10_VIEW 0x28e84
#define CB_COLOR11_VIEW 0x28ea0
 
#define CB_COLOR1_INFO 0x28cac
#define CB_COLOR2_INFO 0x28ce8
#define CB_COLOR3_INFO 0x28d24
#define CB_COLOR4_INFO 0x28d60
#define CB_COLOR5_INFO 0x28d9c
#define CB_COLOR6_INFO 0x28dd8
#define CB_COLOR7_INFO 0x28e14
#define CB_COLOR8_INFO 0x28e50
#define CB_COLOR9_INFO 0x28e6c
#define CB_COLOR10_INFO 0x28e88
#define CB_COLOR11_INFO 0x28ea4
 
#define CB_COLOR1_ATTRIB 0x28cb0
#define CB_COLOR2_ATTRIB 0x28cec
#define CB_COLOR3_ATTRIB 0x28d28
#define CB_COLOR4_ATTRIB 0x28d64
#define CB_COLOR5_ATTRIB 0x28da0
#define CB_COLOR6_ATTRIB 0x28ddc
#define CB_COLOR7_ATTRIB 0x28e18
#define CB_COLOR8_ATTRIB 0x28e54
#define CB_COLOR9_ATTRIB 0x28e70
#define CB_COLOR10_ATTRIB 0x28e8c
#define CB_COLOR11_ATTRIB 0x28ea8
 
#define CB_COLOR1_DIM 0x28cb4
#define CB_COLOR2_DIM 0x28cf0
#define CB_COLOR3_DIM 0x28d2c
#define CB_COLOR4_DIM 0x28d68
#define CB_COLOR5_DIM 0x28da4
#define CB_COLOR6_DIM 0x28de0
#define CB_COLOR7_DIM 0x28e1c
#define CB_COLOR8_DIM 0x28e58
#define CB_COLOR9_DIM 0x28e74
#define CB_COLOR10_DIM 0x28e90
#define CB_COLOR11_DIM 0x28eac
 
#define CB_COLOR1_CMASK 0x28cb8
#define CB_COLOR2_CMASK 0x28cf4
#define CB_COLOR3_CMASK 0x28d30
#define CB_COLOR4_CMASK 0x28d6c
#define CB_COLOR5_CMASK 0x28da8
#define CB_COLOR6_CMASK 0x28de4
#define CB_COLOR7_CMASK 0x28e20
 
#define CB_COLOR1_CMASK_SLICE 0x28cbc
#define CB_COLOR2_CMASK_SLICE 0x28cf8
#define CB_COLOR3_CMASK_SLICE 0x28d34
#define CB_COLOR4_CMASK_SLICE 0x28d70
#define CB_COLOR5_CMASK_SLICE 0x28dac
#define CB_COLOR6_CMASK_SLICE 0x28de8
#define CB_COLOR7_CMASK_SLICE 0x28e24
 
#define CB_COLOR1_FMASK 0x28cc0
#define CB_COLOR2_FMASK 0x28cfc
#define CB_COLOR3_FMASK 0x28d38
#define CB_COLOR4_FMASK 0x28d74
#define CB_COLOR5_FMASK 0x28db0
#define CB_COLOR6_FMASK 0x28dec
#define CB_COLOR7_FMASK 0x28e28
 
#define CB_COLOR1_FMASK_SLICE 0x28cc4
#define CB_COLOR2_FMASK_SLICE 0x28d00
#define CB_COLOR3_FMASK_SLICE 0x28d3c
#define CB_COLOR4_FMASK_SLICE 0x28d78
#define CB_COLOR5_FMASK_SLICE 0x28db4
#define CB_COLOR6_FMASK_SLICE 0x28df0
#define CB_COLOR7_FMASK_SLICE 0x28e2c
 
#define CB_COLOR1_CLEAR_WORD0 0x28cc8
#define CB_COLOR2_CLEAR_WORD0 0x28d04
#define CB_COLOR3_CLEAR_WORD0 0x28d40
#define CB_COLOR4_CLEAR_WORD0 0x28d7c
#define CB_COLOR5_CLEAR_WORD0 0x28db8
#define CB_COLOR6_CLEAR_WORD0 0x28df4
#define CB_COLOR7_CLEAR_WORD0 0x28e30
 
#define CB_COLOR1_CLEAR_WORD1 0x28ccc
#define CB_COLOR2_CLEAR_WORD1 0x28d08
#define CB_COLOR3_CLEAR_WORD1 0x28d44
#define CB_COLOR4_CLEAR_WORD1 0x28d80
#define CB_COLOR5_CLEAR_WORD1 0x28dbc
#define CB_COLOR6_CLEAR_WORD1 0x28df8
#define CB_COLOR7_CLEAR_WORD1 0x28e34
 
#define CB_COLOR1_CLEAR_WORD2 0x28cd0
#define CB_COLOR2_CLEAR_WORD2 0x28d0c
#define CB_COLOR3_CLEAR_WORD2 0x28d48
#define CB_COLOR4_CLEAR_WORD2 0x28d84
#define CB_COLOR5_CLEAR_WORD2 0x28dc0
#define CB_COLOR6_CLEAR_WORD2 0x28dfc
#define CB_COLOR7_CLEAR_WORD2 0x28e38
 
#define CB_COLOR1_CLEAR_WORD3 0x28cd4
#define CB_COLOR2_CLEAR_WORD3 0x28d10
#define CB_COLOR3_CLEAR_WORD3 0x28d4c
#define CB_COLOR4_CLEAR_WORD3 0x28d88
#define CB_COLOR5_CLEAR_WORD3 0x28dc4
#define CB_COLOR6_CLEAR_WORD3 0x28e00
#define CB_COLOR7_CLEAR_WORD3 0x28e3c
 
#define SQ_TEX_RESOURCE_WORD0_0 0x30000
#define SQ_TEX_RESOURCE_WORD1_0 0x30004
# define TEX_ARRAY_MODE(x) ((x) << 28)
#define SQ_TEX_RESOURCE_WORD2_0 0x30008
#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
#define SQ_TEX_RESOURCE_WORD4_0 0x30010
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
 
/* cayman 3D regs */
#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
 
#endif
/drivers/video/drm/radeon/firmware/CEDAR_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/CEDAR_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/CEDAR_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/CYPRESS_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/CYPRESS_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/CYPRESS_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/JUNIPER_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/JUNIPER_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/JUNIPER_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/REDWOOD_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/REDWOOD_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/REDWOOD_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/ni_reg.h
0,0 → 1,86
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#ifndef __NI_REG_H__
#define __NI_REG_H__
 
/* northern islands - DCE5 */
 
#define NI_INPUT_GAMMA_CONTROL 0x6840
# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
# define NI_INPUT_GAMMA_USE_LUT 0
# define NI_INPUT_GAMMA_BYPASS 1
# define NI_INPUT_GAMMA_SRGB_24 2
# define NI_INPUT_GAMMA_XVYCC_222 3
# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
 
#define NI_PRESCALE_GRPH_CONTROL 0x68b4
# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
 
#define NI_PRESCALE_OVL_CONTROL 0x68c4
# define NI_OVL_PRESCALE_BYPASS (1 << 4)
 
#define NI_INPUT_CSC_CONTROL 0x68d4
# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
# define NI_INPUT_CSC_BYPASS 0
# define NI_INPUT_CSC_PROG_COEFF 1
# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
 
#define NI_OUTPUT_CSC_CONTROL 0x68f0
# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
# define NI_OUTPUT_CSC_BYPASS 0
# define NI_OUTPUT_CSC_TV_RGB 1
# define NI_OUTPUT_CSC_YCBCR_601 2
# define NI_OUTPUT_CSC_YCBCR_709 3
# define NI_OUTPUT_CSC_PROG_COEFF 4
# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
 
#define NI_DEGAMMA_CONTROL 0x6960
# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
# define NI_DEGAMMA_BYPASS 0
# define NI_DEGAMMA_SRGB_24 1
# define NI_DEGAMMA_XVYCC_222 2
# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
 
#define NI_GAMUT_REMAP_CONTROL 0x6964
# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
# define NI_GAMUT_REMAP_BYPASS 0
# define NI_GAMUT_REMAP_PROG_COEFF 1
# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
 
#define NI_REGAMMA_CONTROL 0x6a80
# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
# define NI_REGAMMA_BYPASS 0
# define NI_REGAMMA_SRGB_24 1
# define NI_REGAMMA_XVYCC_222 2
# define NI_REGAMMA_PROG_A 3
# define NI_REGAMMA_PROG_B 4
# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
 
#endif
/drivers/video/drm/radeon/pci.c
1,9 → 1,9
 
#include <ddk.h>
#include <linux/kernel.h>
#include <errno-base.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <pci.h>
#include <errno-base.h>
#include <syscall.h>
 
static LIST_HEAD(devices);
95,10 → 95,10
res = &dev->resource[pos];
 
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
l = PciRead32(dev->busnr, dev->devfn, reg);
PciWrite32(dev->busnr, dev->devfn, reg, ~0);
sz = PciRead32(dev->busnr, dev->devfn, reg);
PciWrite32(dev->busnr, dev->devfn, reg, l);
l = PciRead32(dev->bus, dev->devfn, reg);
PciWrite32(dev->bus, dev->devfn, reg, ~0);
sz = PciRead32(dev->bus, dev->devfn, reg);
PciWrite32(dev->bus, dev->devfn, reg, l);
 
if (!sz || sz == 0xffffffff)
continue;
134,10 → 134,10
{
u32_t szhi, lhi;
 
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
lhi = PciRead32(dev->bus, dev->devfn, reg+4);
PciWrite32(dev->bus, dev->devfn, reg+4, ~0);
szhi = PciRead32(dev->bus, dev->devfn, reg+4);
PciWrite32(dev->bus, dev->devfn, reg+4, lhi);
sz64 = ((u64_t)szhi << 32) | raw_sz;
l64 = ((u64_t)lhi << 32) | l;
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
162,9 → 162,9
else if (lhi)
{
/* 64-bit wide address, treat as disabled */
PciWrite32(dev->busnr, dev->devfn, reg,
PciWrite32(dev->bus, dev->devfn, reg,
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
PciWrite32(dev->bus, dev->devfn, reg+4, 0);
res->start = 0;
res->end = sz;
}
177,10 → 177,10
dev->rom_base_reg = rom;
res = &dev->resource[PCI_ROM_RESOURCE];
 
l = PciRead32(dev->busnr, dev->devfn, rom);
PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE);
sz = PciRead32(dev->busnr, dev->devfn, rom);
PciWrite32(dev->busnr, dev->devfn, rom, l);
l = PciRead32(dev->bus, dev->devfn, rom);
PciWrite32(dev->bus, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE);
sz = PciRead32(dev->bus, dev->devfn, rom);
PciWrite32(dev->bus, dev->devfn, rom, l);
 
if (l == 0xffffffff)
l = 0;
205,10 → 205,10
{
u8_t irq;
 
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN);
dev->pin = irq;
if (irq)
PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE);
PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE);
dev->irq = irq;
};
 
217,7 → 217,7
{
u32_t class;
 
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
class = PciRead32(dev->bus, dev->devfn, PCI_CLASS_REVISION);
dev->revision = class & 0xff;
class >>= 8; /* upper 3 bytes */
dev->class = class;
236,8 → 236,8
goto bad;
pci_read_irq(dev);
pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID);
dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID);
dev->subsystem_vendor = PciRead16(dev->bus, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID);
dev->subsystem_device = PciRead16(dev->bus, dev->devfn, PCI_SUBSYSTEM_ID);
 
/*
* Do the ugly legacy mode stuff here rather than broken chip
249,7 → 249,7
{
u8_t progif;
 
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
progif = PciRead8(dev->bus, dev->devfn,PCI_CLASS_PROG);
if ((progif & 1) == 0)
{
dev->resource[0].start = 0x1F0;
287,11 → 287,11
goto bad;
pci_read_irq(dev);
pci_read_bases(dev, 1, 0);
dev->subsystem_vendor = PciRead16(dev->busnr,
dev->subsystem_vendor = PciRead16(dev->bus,
dev->devfn,
PCI_CB_SUBSYSTEM_VENDOR_ID);
 
dev->subsystem_device = PciRead16(dev->busnr,
dev->subsystem_device = PciRead16(dev->bus,
dev->devfn,
PCI_CB_SUBSYSTEM_ID);
break;
354,7 → 354,7
if(unlikely(dev == NULL))
return NULL;
 
dev->pci_dev.busnr = bus;
dev->pci_dev.bus = bus;
dev->pci_dev.devfn = devfn;
dev->pci_dev.hdr_type = hdr & 0x7f;
dev->pci_dev.multifunction = !!(hdr & 0x80);
505,15 → 505,141
{
int pos;
 
pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type);
pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
if (pos)
pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap);
pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 
return pos;
}
 
 
#if 0
/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to be suspended
* @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
*
* Transition a device to a new power state, using the Power Management
* Capabilities in the device's config space.
*
* RETURN VALUE:
* -EINVAL if trying to enter a lower state than we're already in.
* 0 if we're already in the requested state.
* -EIO if device does not support PCI PM.
* 0 if we can successfully change the power state.
*/
int
pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
int pm, need_restore = 0;
u16 pmcsr, pmc;
 
/* bound the state we're entering */
if (state > PCI_D3hot)
state = PCI_D3hot;
 
/*
* If the device or the parent bridge can't support PCI PM, ignore
* the request if we're doing anything besides putting it into D0
* (which would only happen on boot).
*/
if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
return 0;
 
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
 
/* abort if the device doesn't support PM capabilities */
if (!pm)
return -EIO;
 
/* Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state > state) {
printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
__FUNCTION__, pci_name(dev), state, dev->current_state);
return -EINVAL;
} else if (dev->current_state == state)
return 0; /* we're already there */
 
 
pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
printk(KERN_DEBUG
"PCI: %s has unsupported PM cap regs version (%u)\n",
pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
return -EIO;
}
 
/* check if this device supports the desired state */
if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
return -EIO;
else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
return -EIO;
 
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
 
/* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
switch (dev->current_state) {
case PCI_D0:
case PCI_D1:
case PCI_D2:
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = 1;
/* Fall-through: force to D0 */
default:
pmcsr = 0;
break;
}
 
/* enter specified state */
pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
 
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
msleep(pci_pm_d3_delay);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(200);
 
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
* Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);
 
dev->current_state = state;
 
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
* from D3hot to D0 _may_ perform an internal reset, thereby
* going to "D0 Uninitialized" rather than "D0 Initialized".
* For example, at least some versions of the 3c905B and the
* 3c556B exhibit this behaviour.
*
* At least some laptop BIOSen (e.g. the Thinkpad T21) leave
* devices in a D3hot state at boot. Consequently, we need to
* restore at least the BARs so that the device will be
* accessible to its driver.
*/
if (need_restore)
pci_restore_bars(dev);
 
return 0;
}
#endif
 
int pcibios_enable_resources(struct pci_dev *dev, int mask)
{
u16_t cmd, old_cmd;
520,7 → 646,7
int idx;
struct resource *r;
 
cmd = PciRead16(dev->busnr, dev->devfn, PCI_COMMAND);
cmd = PciRead16(dev->bus, dev->devfn, PCI_COMMAND);
old_cmd = cmd;
for (idx = 0; idx < PCI_NUM_RESOURCES; idx++)
{
548,7 → 674,7
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
PciWrite16(dev->busnr, dev->devfn, PCI_COMMAND, cmd);
PciWrite16(dev->bus, dev->devfn, PCI_COMMAND, cmd);
}
return 0;
}
642,6 → 768,7
}
};
}
 
return NULL;
};
 
668,6 → 795,53
void *rom;
 
#if 0
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (u32_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
} else {
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
return (void *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
//if (res->parent == NULL &&
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
// return NULL;
start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
if (*size == 0)
return NULL;
 
/* Enable ROM space decodes */
if (pci_enable_rom(pdev))
return NULL;
}
}
 
rom = ioremap(start, *size);
if (!rom) {
/* restore enable if ioremap fails */
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_COPY)))
pci_disable_rom(pdev);
return NULL;
}
 
/*
* Try to find the true size of the ROM since sometimes the PCI window
* size is much larger than the actual size of the ROM.
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(rom, *size);
 
#endif
 
unsigned char tmp[32];
699,3 → 873,4
return 0;
}
 
 
/drivers/video/drm/radeon/r100.c
26,15 → 26,18
* Jerome Glisse
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "r100d.h"
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
#include "atom.h"
 
#include <linux/firmware.h>
 
63,6 → 66,34
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
 
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
 
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
/* Wait for update_pending to go high. */
while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
/* Return current update_pending status: */
return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
}
bool r100_gui_idle(struct radeon_device *rdev)
{
if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
return false;
else
return true;
}
 
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
169,7 → 200,7
int r;
 
if (rdev->gart.table.ram.ptr) {
WARN(1, "R100 PCI GART already initialized.\n");
WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
233,9 → 264,9
 
void r100_pci_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r100_pci_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
radeon_gart_fini(rdev);
}
 
 
250,6 → 281,7
WREG32(R_000044_GEN_INT_STATUS, tmp);
}
 
#if 0
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
257,6 → 289,12
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= RADEON_GUI_IDLE_STAT;
}
 
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
263,7 → 301,9
return irqs & irq_mask;
}
 
#endif
 
 
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
if (crtc == 0)
299,70 → 339,7
}
 
#if 0
/*
* Writeback
*/
int r100_wb_init(struct radeon_device *rdev)
{
int r;
 
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->wb.wb_obj);
if (r) {
dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
radeon_bo_unreserve(rdev->wb.wb_obj);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r;
}
}
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
WREG32(R_00070C_CP_RB_RPTR_ADDR,
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
WREG32(R_000770_SCRATCH_UMSK, 0xff);
return 0;
}
 
void r100_wb_disable(struct radeon_device *rdev)
{
WREG32(R_000770_SCRATCH_UMSK, 0);
}
 
void r100_wb_fini(struct radeon_device *rdev)
{
int r;
 
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
dev_err(rdev->dev, "(%d) can't finish WB\n", r);
return;
}
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
 
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
479,7 → 456,7
const char *fw_name = NULL;
int err;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
582,26 → 559,6
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
/* Reset CP */
tmp = RREG32(RADEON_CP_CSQ_STAT);
if ((tmp & (1 << 31))) {
DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
tmp = RREG32(RADEON_RBBM_SOFT_RESET);
mdelay(2);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
tmp = RREG32(RADEON_RBBM_SOFT_RESET);
mdelay(2);
tmp = RREG32(RADEON_CP_CSQ_STAT);
if ((tmp & (1 << 31))) {
DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
}
} else {
DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
}
 
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
646,30 → 603,44
WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
REG_SET(RADEON_MAX_FETCH, max_fetch) |
RADEON_RB_NO_UPDATE);
REG_SET(RADEON_MAX_FETCH, max_fetch));
#ifdef __BIG_ENDIAN
tmp |= RADEON_BUF_SWAP_32BIT;
#endif
WREG32(RADEON_CP_RB_CNTL, tmp);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
 
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
 
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
 
if (rdev->wb.enabled)
WREG32(R_000770_SCRATCH_UMSK, 0xff);
else {
tmp |= RADEON_RB_NO_UPDATE;
WREG32(R_000770_SCRATCH_UMSK, 0);
}
 
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
/* protect against crazy HW on resume */
rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
REG_SET(RADEON_INDIRECT1_START, indirect1_start));
WREG32(0x718, 0);
WREG32(0x744, 0x00004D4D);
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev);
r = radeon_ring_test(rdev);
678,6 → 649,7
return r;
}
rdev->cp.ready = true;
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
 
695,9 → 667,11
void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
704,39 → 678,6
}
}
 
int r100_cp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
 
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 16))) {
DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
return -1;
}
 
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
890,14 → 831,12
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
DRM_ERROR("vline wait had illegal wait until segment\n");
r = -EINVAL;
return r;
return -EINVAL;
}
 
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n");
r = -EINVAL;
return r;
return -EINVAL;
}
 
/* jump over the NOP */
912,12 → 851,10
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = CP_PACKET0_GET_REG(header);
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
r = -EINVAL;
goto out;
return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
939,15 → 876,13
break;
default:
DRM_ERROR("unknown crtc reloc\n");
r = -EINVAL;
goto out;
return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r;
 
return 0;
}
 
/**
1097,6 → 1032,7
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
1109,6 → 1045,7
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
1124,6 → 1061,7
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T0_0:
case RADEON_PP_CUBIC_OFFSET_T0_1:
1141,6 → 1079,7
track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T1_0:
case RADEON_PP_CUBIC_OFFSET_T1_1:
1158,6 → 1097,7
track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T2_0:
case RADEON_PP_CUBIC_OFFSET_T2_1:
1175,9 → 1115,12
track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
track->cb_dirty = true;
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
1198,9 → 1141,11
ib[idx] = tmp;
 
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1225,6 → 1170,8
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
track->cb_dirty = true;
track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
1242,6 → 1189,7
default:
break;
}
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
1258,6 → 1206,7
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
1272,6 → 1221,7
i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
track->tex_dirty = true;
break;
case RADEON_PP_TEX_PITCH_0:
case RADEON_PP_TEX_PITCH_1:
1278,6 → 1228,7
case RADEON_PP_TEX_PITCH_2:
i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
track->textures[i].pitch = idx_value + 32;
track->tex_dirty = true;
break;
case RADEON_PP_TXFILTER_0:
case RADEON_PP_TXFILTER_1:
1291,6 → 1242,7
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
track->tex_dirty = true;
break;
case RADEON_PP_TXFORMAT_0:
case RADEON_PP_TXFORMAT_1:
1310,6 → 1262,7
case RADEON_TXFORMAT_RGB332:
case RADEON_TXFORMAT_Y8:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_AI88:
case RADEON_TXFORMAT_ARGB1555:
1321,6 → 1274,7
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
1327,6 → 1281,7
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
1340,6 → 1295,7
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_FACES_0:
case RADEON_PP_CUBIC_FACES_1:
1350,6 → 1306,7
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1484,6 → 1441,11
return r;
break;
/* triggers drawing using indices to vertex buffer */
case PACKET3_3D_CLEAR_HIZ:
case PACKET3_3D_CLEAR_ZMASK:
if (p->rdev->hyperz_filp != p->filp)
return -EINVAL;
break;
case PACKET3_NOP:
break;
default:
1652,83 → 1614,164
return -1;
}
 
void r100_gpu_init(struct radeon_device *rdev)
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
/* TODO: anythings to do here ? pipes ? */
r100_hdp_reset(rdev);
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = 0; //jiffies;
}
 
void r100_hdp_reset(struct radeon_device *rdev)
/**
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
* @rdev: radeon device structure
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
* @cp: radeon_cp structure holding CP information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
uint32_t tmp;
unsigned long cjiffies, elapsed;
 
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
tmp |= (7 << 28);
WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
(void)RREG32(RADEON_HOST_PATH_CNTL);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
WREG32(RADEON_HOST_PATH_CNTL, tmp);
(void)RREG32(RADEON_HOST_PATH_CNTL);
#if 0
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
if (cp->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
#endif
 
int r100_rb2d_reset(struct radeon_device *rdev)
/* give a chance to the GPU ... */
return false;
}
 
bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
uint32_t tmp;
int i;
u32 rbbm_status;
int r;
 
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 26))) {
DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
return 0;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
return false;
}
DRM_UDELAY(1);
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
return -1;
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
 
int r100_gpu_reset(struct radeon_device *rdev)
void r100_bm_disable(struct radeon_device *rdev)
{
uint32_t status;
u32 tmp;
 
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
/* disable bus mastering */
tmp = RREG32(R_000030_BUS_CNTL);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4);
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB);
mdelay(1);
}
/* TODO: reset 3D engine */
 
int r100_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
u32 status, tmp;
int ret = 0;
 
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
// pci_save_state(rdev->pdev);
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
S_0000F0_SOFT_RESET_RE(1) |
S_0000F0_SOFT_RESET_PP(1) |
S_0000F0_SOFT_RESET_RB(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
}
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
// pci_restore_state(rdev->pdev);
r100_enable_bm(rdev);
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
r100_mc_resume(rdev, &save);
return ret;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
return 0;
}
 
void r100_set_common_regs(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
u32 tmp;
 
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
1800,6 → 1843,12
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
}
 
/* switch PM block to ACPI mode */
tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
tmp &= ~RADEON_PM_MODE_SEL;
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
 
}
 
/*
1883,8 → 1932,8
u64 config_aper_size;
 
/* work out accessible VRAM */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
1909,17 → 1958,15
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
if (rdev->mc.aper_size > config_aper_size)
config_aper_size = rdev->mc.aper_size;
 
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
}
 
void r100_vga_set_state(struct radeon_device *rdev, bool state)
{
1927,10 → 1974,10
 
temp = RREG32(RADEON_CONFIG_CNTL);
if (state == false) {
temp &= ~(1<<8);
temp |= (1<<9);
temp &= ~RADEON_CFG_VGA_RAM_EN;
temp |= RADEON_CFG_VGA_IO_DIS;
} else {
temp &= ~(1<<9);
temp &= ~RADEON_CFG_VGA_IO_DIS;
}
WREG32(RADEON_CONFIG_CNTL, temp);
}
1945,8 → 1992,10
if (rdev->flags & RADEON_IS_IGP)
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
 
 
1955,12 → 2004,11
*/
void r100_pll_errata_after_index(struct radeon_device *rdev)
{
if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
return;
}
if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
(void)RREG32(RADEON_CLOCK_CNTL_DATA);
(void)RREG32(RADEON_CRTC_GEN_CNTL);
}
}
 
static void r100_pll_errata_after_data(struct radeon_device *rdev)
{
2197,12 → 2245,6
int surf_index = reg * 16;
int flags = 0;
 
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags = pitch / 16;
else
flags = pitch / 8;
 
if (rdev->family <= CHIP_RS200) {
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2226,7 → 2268,21
if (tiling_flags & RADEON_TILING_SWAP_32BIT)
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
 
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
if (ASIC_IS_RN50(rdev))
pitch /= 16;
}
 
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags |= pitch / 16;
else
flags |= pitch / 8;
 
 
DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2246,53 → 2302,53
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(0),
fixed_init_half(1),
fixed_init_half(2),
fixed_init(0),
dfixed_init(1),
dfixed_init(2),
dfixed_init(3),
dfixed_init(0),
dfixed_init_half(1),
dfixed_init_half(2),
dfixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
fixed_init(0),
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(0),
fixed_init_half(1),
fixed_init_half(2),
fixed_init_half(3),
dfixed_init(0),
dfixed_init(1),
dfixed_init(2),
dfixed_init(3),
dfixed_init(0),
dfixed_init_half(1),
dfixed_init_half(2),
dfixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
fixed_init(0),
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(4),
fixed_init(5),
fixed_init(6),
fixed_init(7),
dfixed_init(0),
dfixed_init(1),
dfixed_init(2),
dfixed_init(3),
dfixed_init(4),
dfixed_init(5),
dfixed_init(6),
dfixed_init(7),
};
fixed20_12 memtrbs[8] = {
fixed_init(1),
fixed_init_half(1),
fixed_init(2),
fixed_init_half(2),
fixed_init(3),
fixed_init_half(3),
fixed_init(4),
fixed_init_half(4)
dfixed_init(1),
dfixed_init_half(1),
dfixed_init(2),
dfixed_init_half(2),
dfixed_init(3),
dfixed_init_half(3),
dfixed_init(4),
dfixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
fixed_init(4),
fixed_init(5),
fixed_init(6),
fixed_init(7),
fixed_init(8),
fixed_init(9),
fixed_init(10),
fixed_init(11)
dfixed_init(4),
dfixed_init(5),
dfixed_init(6),
dfixed_init(7),
dfixed_init(8),
dfixed_init(9),
dfixed_init(10),
dfixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2310,6 → 2366,8
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2321,7 → 2379,7
}
}
 
min_mem_eff.full = rfixed_const_8(0);
min_mem_eff.full = dfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2338,35 → 2396,32
/*
* determine is there is enough bw for current mode
*/
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
temp_ff.full = rfixed_const(100);
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
sclk_ff = rdev->pm.sclk;
mclk_ff = rdev->pm.mclk;
 
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
temp_ff.full = dfixed_const(temp);
mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
 
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
temp_ff.full = rfixed_const(1000);
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes1);
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
temp_ff.full = dfixed_const(1000);
pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = dfixed_div(pix_clk, temp_ff);
temp_ff.full = dfixed_const(pixel_bytes1);
peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
temp_ff.full = rfixed_const(1000);
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes2);
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
temp_ff.full = dfixed_const(1000);
pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
temp_ff.full = dfixed_const(pixel_bytes2);
peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
}
 
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2408,9 → 2463,9
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
trcd_ff.full = rfixed_const(mem_trcd);
trp_ff.full = rfixed_const(mem_trp);
tras_ff.full = rfixed_const(mem_tras);
trcd_ff.full = dfixed_const(mem_trcd);
trp_ff.full = dfixed_const(mem_trp);
tras_ff.full = dfixed_const(mem_tras);
 
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2428,7 → 2483,7
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
tcas_ff.full += rfixed_const(data);
tcas_ff.full += dfixed_const(data);
}
 
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2465,72 → 2520,72
 
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
agpmode_ff.full = rfixed_const(radeon_agpmode);
temp_ff.full = rfixed_const_666(16);
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
agpmode_ff.full = dfixed_const(radeon_agpmode);
temp_ff.full = dfixed_const_666(16);
sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
 
if (ASIC_IS_R300(rdev)) {
sclk_delay_ff.full = rfixed_const(250);
sclk_delay_ff.full = dfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
sclk_delay_ff.full = rfixed_const(41);
sclk_delay_ff.full = dfixed_const(41);
else
sclk_delay_ff.full = rfixed_const(33);
sclk_delay_ff.full = dfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
sclk_delay_ff.full = rfixed_const(57);
sclk_delay_ff.full = dfixed_const(57);
else
sclk_delay_ff.full = rfixed_const(41);
sclk_delay_ff.full = dfixed_const(41);
}
}
 
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
 
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
k1.full = rfixed_const(40);
k1.full = dfixed_const(40);
c = 3;
} else {
k1.full = rfixed_const(20);
k1.full = dfixed_const(20);
c = 1;
}
} else {
k1.full = rfixed_const(40);
k1.full = dfixed_const(40);
c = 3;
}
 
temp_ff.full = rfixed_const(2);
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
temp_ff.full = rfixed_const(c);
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
temp_ff.full = rfixed_const(4);
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
temp_ff.full = dfixed_const(2);
mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
temp_ff.full = dfixed_const(c);
mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
temp_ff.full = dfixed_const(4);
mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
 
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
 
/*
HW cursor time assuming worst case of full size colour cursor.
*/
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
 
temp_ff.full = rfixed_const(cur_size);
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
temp_ff.full = dfixed_const(cur_size);
cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
disp_latency_overhead.full = rfixed_const(8);
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
disp_latency_overhead.full = dfixed_const(8);
disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
 
2558,16 → 2613,16
/*
Find the drain rate of the display buffer.
*/
temp_ff.full = rfixed_const((16/pixel_bytes1));
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = dfixed_const((16/pixel_bytes1));
disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
 
/*
Find the critical point of the display buffer.
*/
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
crit_point_ff.full += rfixed_const_half(0);
crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
crit_point_ff.full += dfixed_const_half(0);
 
critical_point = rfixed_trunc(crit_point_ff);
critical_point = dfixed_trunc(crit_point_ff);
 
if (rdev->disp_priority == 2) {
critical_point = 0;
2623,7 → 2678,7
}
#endif
 
DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
}
2638,8 → 2693,8
/*
Find the drain rate of the display buffer.
*/
temp_ff.full = rfixed_const((16/pixel_bytes2));
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = dfixed_const((16/pixel_bytes2));
disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
 
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2660,8 → 2715,8
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
temp_ff.full = rfixed_const(temp);
temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
temp_ff.full = dfixed_const(temp);
temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
 
2669,15 → 2724,15
 
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
crit_point_ff.full += rfixed_const_half(0);
crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
crit_point_ff.full += dfixed_const_half(0);
 
critical_point2 = rfixed_trunc(crit_point_ff);
critical_point2 = dfixed_trunc(crit_point_ff);
 
if (rdev->disp_priority == 2) {
critical_point2 = 0;
2719,13 → 2774,387
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
}
 
DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
}
}
 
#if 0
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
DRM_ERROR("compress format %d\n", t->compress_format);
}
 
static int r100_track_compress_size(int compress_format, int w, int h)
{
int block_width, block_height, block_bytes;
int wblocks, hblocks;
int min_wblocks;
int sz;
 
block_width = 4;
block_height = 4;
 
switch (compress_format) {
case R100_TRACK_COMP_DXT1:
block_bytes = 8;
min_wblocks = 4;
break;
default:
case R100_TRACK_COMP_DXT35:
block_bytes = 16;
min_wblocks = 2;
break;
}
 
hblocks = (h + block_height - 1) / block_height;
wblocks = (w + block_width - 1) / block_width;
if (wblocks < min_wblocks)
wblocks = min_wblocks;
sz = wblocks * hblocks * block_bytes;
return sz;
}
 
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
unsigned compress_format = track->textures[idx].compress_format;
 
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
 
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
 
size += track->textures[idx].cube_info[face].offset;
 
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
 
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h, d;
int ret;
 
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
if (track->textures[u].lookup_disable)
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
return -EINVAL;
}
size = 0;
for (i = 0; i <= track->textures[u].num_levels; i++) {
if (track->textures[u].use_pitch) {
if (rdev->family < CHIP_R300)
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
else
w = track->textures[u].pitch / (1 << i);
} else {
w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
if (track->textures[u].tex_coord_type == 1) {
d = (1 << track->textures[u].txdepth) / (1 << i);
if (!d)
d = 1;
} else {
d = 1;
}
if (track->textures[u].compress_format) {
 
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
size += w * h * d;
}
size *= track->textures[u].cpp;
 
switch (track->textures[u].tex_coord_type) {
case 0:
case 1:
break;
case 2:
if (track->separate_cube) {
ret = r100_cs_track_cube(rdev, track, u);
if (ret)
return ret;
} else
size *= 6;
break;
default:
DRM_ERROR("Invalid texture coordinate type %u for unit "
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
"%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
}
return 0;
}
 
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i;
unsigned long size;
unsigned prim_walk;
unsigned nverts;
unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
 
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
track->cb_dirty = false;
 
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
return -EINVAL;
}
}
track->zb_dirty = false;
 
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->aa.robj));
DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
i, track->aa.pitch, track->cb[0].cpp,
track->aa.offset, track->maxy);
return -EINVAL;
}
}
track->aa_dirty = false;
 
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
} else {
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
}
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
return -EINVAL;
}
}
break;
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
track->immd_dwords, size);
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
prim_walk);
return -EINVAL;
}
 
if (track->tex_dirty) {
track->tex_dirty = false;
return r100_cs_track_texture_check(rdev, track);
}
return 0;
}
 
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i, face;
 
track->cb_dirty = true;
track->zb_dirty = true;
track->tex_dirty = true;
track->aa_dirty = true;
 
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
track->num_texture = 3;
else
track->num_texture = 6;
track->maxy = 2048;
track->separate_cube = 1;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
track->aaresolve = false;
track->aa.robj = NULL;
}
 
for (i = 0; i < track->num_cb; i++) {
track->cb[i].robj = NULL;
track->cb[i].pitch = 8192;
track->cb[i].cpp = 16;
track->cb[i].offset = 0;
}
track->z_enabled = true;
track->zb.robj = NULL;
track->zb.pitch = 8192;
track->zb.cpp = 4;
track->zb.offset = 0;
track->vtx_size = 0x7F;
track->immd_dwords = 0xFFFFFFFFUL;
track->num_arrays = 11;
track->max_indx = 0x00FFFFFFUL;
for (i = 0; i < track->num_arrays; i++) {
track->arrays[i].robj = NULL;
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
track->textures[i].width_11 = 1 << 11;
track->textures[i].height_11 = 1 << 11;
track->textures[i].num_levels = 12;
if (rdev->family <= CHIP_RS200) {
track->textures[i].tex_coord_type = 0;
track->textures[i].txdepth = 0;
} else {
track->textures[i].txdepth = 16;
track->textures[i].tex_coord_type = 1;
}
track->textures[i].cpp = 64;
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
for (face = 0; face < 5; face++) {
track->textures[i].cube_info[face].robj = NULL;
track->textures[i].cube_info[face].width = 16536;
track->textures[i].cube_info[face].height = 16536;
track->textures[i].cube_info[face].offset = 0;
}
}
}
#endif
 
int r100_ring_test(struct radeon_device *rdev)
{
uint32_t scratch;
2758,7 → 3187,7
if (i < rdev->usec_timeout) {
DRM_INFO("ring test succeeded in %d usecs\n", i);
} else {
DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
2766,6 → 3195,96
return r;
}
 
#if 0
 
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(rdev, ib->gpu_addr);
radeon_ring_write(rdev, ib->length_dw);
}
 
int r100_ib_test(struct radeon_device *rdev)
{
struct radeon_ib *ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
int r;
 
r = radeon_scratch_get(rdev, &scratch);
if (r) {
DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, &ib);
if (r) {
return r;
}
ib->ptr[0] = PACKET0(scratch, 0);
ib->ptr[1] = 0xDEADBEEF;
ib->ptr[2] = PACKET2(0);
ib->ptr[3] = PACKET2(0);
ib->ptr[4] = PACKET2(0);
ib->ptr[5] = PACKET2(0);
ib->ptr[6] = PACKET2(0);
ib->ptr[7] = PACKET2(0);
ib->length_dw = 8;
r = radeon_ib_schedule(rdev, ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
r = radeon_fence_wait(ib->fence, false);
if (r) {
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
break;
}
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
 
void r100_ib_fini(struct radeon_device *rdev)
{
radeon_ib_pool_fini(rdev);
}
 
int r100_ib_init(struct radeon_device *rdev)
{
int r;
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
r = r100_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
return 0;
}
#endif
 
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
2909,8 → 3428,6
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
2925,12 → 3442,9
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
2939,7 → 3453,30
return 0;
}
 
/*
* Due to how kexec works, it can leave the hw fully initialised when it
* boots the new kernel. However doing our init sequence with the CP and
* WB stuff setup causes GPU hangs on the RN50 at least. So at startup
* do some quick sanity checks and restore sane values to avoid this
* problem.
*/
void r100_restore_sanity(struct radeon_device *rdev)
{
u32 tmp;
 
tmp = RREG32(RADEON_CP_CSQ_CNTL);
if (tmp) {
WREG32(RADEON_CP_CSQ_CNTL, 0);
}
tmp = RREG32(RADEON_CP_RB_CNTL);
if (tmp) {
WREG32(RADEON_CP_RB_CNTL, 0);
}
tmp = RREG32(RADEON_SCRATCH_UMSK);
if (tmp) {
WREG32(RADEON_SCRATCH_UMSK, 0);
}
}
 
int r100_init(struct radeon_device *rdev)
{
2953,6 → 3490,8
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* sanity check some register to avoid hangs like after kexec */
r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
2968,7 → 3507,7
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
2981,8 → 3520,6
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
3019,7 → 3556,6
// r100_ib_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
// radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
/drivers/video/drm/radeon/r100d.h
48,10 → 48,12
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
#define PACKET3_3D_CLEAR_ZMASK 0x32
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
#define PACKET3_3D_CLEAR_HIZ 0x37
#define PACKET3_BITBLT_MULTI 0x9B
 
#define PACKET0(reg, n) (CP_PACKET0 | \
74,6 → 76,134
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define R_000030_BUS_CNTL 0x000030
#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
#define C_000030_BUS_SUSPEND 0xFFBFFFFF
#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
#define C_000030_LAT_16X 0xFF7FFFFF
#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
#define C_000030_ENFRCWRDY 0xFDFFFFFF
#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
#define C_000030_SERR_EN 0xDFFFFFFF
#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
#define C_000030_BUS_READ_BURST 0xBFFFFFFF
#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
421,7 → 551,7
#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
#define C_000360_CUR2_LOCK 0x7FFFFFFF
#define R_0003C2_GENMO_WT 0x0003C0
#define R_0003C2_GENMO_WT 0x0003C2
#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
710,5 → 840,41
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
#define C_00000D_FORCE_RB 0xEFFFFFFF
 
/* PLL regs */
#define SCLK_CNTL 0xd
#define FORCE_HDP (1 << 17)
#define CLK_PWRMGT_CNTL 0x14
#define GLOBAL_PMAN_EN (1 << 10)
#define DISP_PM (1 << 20)
#define PLL_PWRMGT_CNTL 0x15
#define MPLL_TURNOFF (1 << 0)
#define SPLL_TURNOFF (1 << 1)
#define PPLL_TURNOFF (1 << 2)
#define P2PLL_TURNOFF (1 << 3)
#define TVPLL_TURNOFF (1 << 4)
#define MOBILE_SU (1 << 16)
#define SU_SCLK_USE_BCLK (1 << 17)
#define SCLK_CNTL2 0x1e
#define REDUCED_SPEED_SCLK_MODE (1 << 16)
#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
#define MCLK_MISC 0x1f
#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
#define SCLK_MORE_CNTL 0x35
#define REDUCED_SPEED_SCLK_EN (1 << 16)
#define IO_CG_VOLTAGE_DROP (1 << 17)
#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
#define VOLTAGE_DROP_SYNC (1 << 19)
 
/* mmreg */
#define DISP_PWR_MAN 0xd08
#define DISP_D3_GRPH_RST (1 << 18)
#define DISP_D3_SUBPIC_RST (1 << 19)
#define DISP_D3_OV0_RST (1 << 20)
#define DISP_D1D2_GRPH_RST (1 << 21)
#define DISP_D1D2_SUBPIC_RST (1 << 22)
#define DISP_D1D2_OV0_RST (1 << 23)
#define DISP_DVO_ENABLE_RST (1 << 24)
#define TV_ENABLE_RST (1 << 25)
#define AUTO_PWRUP_EN (1 << 26)
 
#endif
/drivers/video/drm/radeon/r200.c
30,13 → 30,15
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
 
#include "r100d.h"
#include "r200_reg_safe.h"
 
//#include "r100_track.h"
#if 0
 
#if 0
#include "r100_track.h"
 
static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
{
int vtx_size, i;
81,6 → 83,51
return vtx_size;
}
 
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence)
{
uint32_t size;
uint32_t cur_size;
int i, num_loops;
int r = 0;
 
/* radeon pitch is /64 */
size = num_pages << PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(rdev, PACKET0(0x720, 2));
radeon_ring_write(rdev, src_offset);
radeon_ring_write(rdev, dst_offset);
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev);
return r;
}
 
 
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
139,6 → 186,7
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
151,6 → 199,7
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
169,6 → 218,7
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
case R200_PP_CUBIC_OFFSET_F1_0:
case R200_PP_CUBIC_OFFSET_F2_0:
212,9 → 262,12
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
track->cb_dirty = true;
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
235,9 → 288,11
ib[idx] = tmp;
 
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
267,6 → 322,8
}
 
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
track->cb_dirty = true;
track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
284,6 → 341,7
default:
break;
}
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
300,6 → 358,7
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
324,6 → 383,7
i = (reg - R200_PP_TXSIZE_0) / 32;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
track->tex_dirty = true;
break;
case R200_PP_TXPITCH_0:
case R200_PP_TXPITCH_1:
333,6 → 393,7
case R200_PP_TXPITCH_5:
i = (reg - R200_PP_TXPITCH_0) / 32;
track->textures[i].pitch = idx_value + 32;
track->tex_dirty = true;
break;
case R200_PP_TXFILTER_0:
case R200_PP_TXFILTER_1:
349,6 → 410,7
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
track->tex_dirty = true;
break;
case R200_PP_TXMULTI_CTL_0:
case R200_PP_TXMULTI_CTL_1:
370,6 → 432,8
/* 2D, 3D, CUBE */
switch (tmp) {
case 0:
case 3:
case 4:
case 5:
case 6:
case 7:
385,6 → 449,7
track->textures[i].tex_coord_type = 1;
break;
}
track->tex_dirty = true;
break;
case R200_PP_TXFORMAT_0:
case R200_PP_TXFORMAT_1:
400,11 → 465,14
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
track->textures[i].lookup_disable = true;
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332:
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
416,6 → 484,7
case R200_TXFORMAT_DVDU88:
case R200_TXFORMAT_AVYU4444:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_ARGB8888:
case R200_TXFORMAT_RGBA8888:
423,6 → 492,7
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
436,6 → 506,7
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
track->tex_dirty = true;
break;
case R200_PP_CUBIC_FACES_0:
case R200_PP_CUBIC_FACES_1:
449,6 → 520,7
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
/drivers/video/drm/radeon/r300.c
26,10 → 26,13
* Jerome Glisse
*/
#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
 
#include "r300d.h"
66,6 → 69,9
mb();
}
 
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
 
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
75,7 → 81,7
}
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24) |
0xc;
R300_PTE_WRITEABLE | R300_PTE_READABLE;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
88,7 → 94,7
int r;
 
if (rdev->gart.table.vram.robj) {
WARN(1, "RV370 PCIE GART already initialized.\n");
WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
132,7 → 138,7
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
WREG32_PCIE(0x18, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_EN;
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
149,6 → 155,10
u32 tmp;
int r;
 
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
164,9 → 174,9
 
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rv370_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
 
void r300_fence_ring_emit(struct radeon_device *rdev,
321,13 → 331,12
{
uint32_t gb_tile_config, tmp;
 
r100_hdp_reset(rdev);
/* FIXME: rv380 one pipes ? */
if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
(rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
rdev->num_gb_pipes = 2;
} else {
/* rv350,rv370,rv380 */
/* rv350,rv370,rv380,r300 AD, r350 AH */
rdev->num_gb_pipes = 1;
}
rdev->num_z_pipes = 1;
373,89 → 382,86
rdev->num_gb_pipes, rdev->num_z_pipes);
}
 
int r300_ga_reset(struct radeon_device *rdev)
bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
u32 rbbm_status;
int r;
 
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
for (i = 0; i < rdev->usec_timeout; i++) {
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RADEON_RBBM_STATUS);
if (tmp & ((1 << 20) | (1 << 26))) {
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
WREG32(R300_RE_SCISSORS_TL, 0);
WREG32(R300_RE_SCISSORS_BR, 0);
WREG32(0x24AC, 0);
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
break;
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
return -1;
}
 
int r300_gpu_reset(struct radeon_device *rdev)
int r300_asic_reset(struct radeon_device *rdev)
{
uint32_t status;
struct r100_mc_save save;
u32 status, tmp;
int ret = 0;
 
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
/* reset GA */
if (status & ((1 << 20) | (1 << 26))) {
r300_ga_reset(rdev);
}
/* reset CP */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
}
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
// pci_save_state(rdev->pdev);
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* resetting the CP seems to be problematic sometimes it end up
* hard locking the computer, but it's necessary for successful
* reset more test & playing is needed on R3XX/R4XX to find a
* reliable (if any solution)
*/
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
// pci_restore_state(rdev->pdev);
r100_enable_bm(rdev);
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
r100_mc_resume(rdev, &save);
return ret;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
return 0;
}
 
 
/*
* r300,r350,rv350,rv380 VRAM info
*/
479,8 → 485,10
if (rdev->flags & RADEON_IS_IGP)
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
 
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
554,10 → 562,7
 
/* FIXME wait for idle */
 
if (rdev->family < CHIP_R600)
link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
else
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
 
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
case RADEON_PCIE_LC_LINK_WIDTH_X0:
665,6 → 670,7
}
track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
677,6 → 683,7
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_TX_OFFSET_0:
715,6 → 722,7
tmp |= tile_flags;
ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
/* Tracked registers */
case 0x2084:
729,6 → 737,12
/* VAP_VF_MAX_VTX_INDX */
track->max_indx = idx_value & 0x00FFFFFFUL;
break;
case 0x2088:
/* VAP_ALT_NUM_VERTICES - only valid on r500 */
if (p->rdev->family < CHIP_RV515)
goto fail;
track->vap_alt_nverts = idx_value & 0xFFFFFF;
break;
case 0x43E4:
/* SC_SCISSOR1 */
track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
735,10 → 749,18
if (p->rdev->family < CHIP_RV515) {
track->maxy -= 1440;
}
track->cb_dirty = true;
track->zb_dirty = true;
break;
case 0x4E00:
/* RB3D_CCTL */
if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
p->rdev->cmask_filp != p->filp) {
DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
return -EINVAL;
}
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
track->cb_dirty = true;
break;
case 0x4E38:
case 0x4E3C:
766,7 → 788,6
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
 
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
781,6 → 802,13
case 15:
track->cb[i].cpp = 2;
break;
case 5:
if (p->rdev->family < CHIP_RV515) {
DRM_ERROR("Invalid color buffer format (%d)!\n",
((idx_value >> 21) & 0xF));
return -EINVAL;
}
/* Pass through. */
case 6:
track->cb[i].cpp = 4;
break;
795,6 → 823,7
((idx_value >> 21) & 0xF));
return -EINVAL;
}
track->cb_dirty = true;
break;
case 0x4F00:
/* ZB_CNTL */
803,6 → 832,7
} else {
track->z_enabled = false;
}
track->zb_dirty = true;
break;
case 0x4F10:
/* ZB_FORMAT */
819,6 → 849,7
(idx_value & 0xF));
return -EINVAL;
}
track->zb_dirty = true;
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
842,8 → 873,10
ib[idx] = tmp;
 
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
case 0x4104:
/* TX_ENABLE */
for (i = 0; i < 16; i++) {
bool enabled;
 
850,6 → 883,7
enabled = !!(idx_value & (1 << i));
track->textures[i].enabled = enabled;
}
track->tex_dirty = true;
break;
case 0x44C0:
case 0x44C4:
876,8 → 910,10
case R300_TX_FORMAT_Y4X4:
case R300_TX_FORMAT_Z3Y3X2:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
case R300_TX_FORMAT_FL_I16:
case R300_TX_FORMAT_Y8X8:
case R300_TX_FORMAT_Z5Y6X5:
case R300_TX_FORMAT_Z6Y5X5:
887,8 → 923,10
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
case R300_TX_FORMAT_FL_I16A16:
case R300_TX_FORMAT_Z11Y11X10:
case R300_TX_FORMAT_Z10Y11X11:
case R300_TX_FORMAT_W8Z8Y8X8:
897,14 → 935,17
case R300_TX_FORMAT_FL_I32:
case 0x1e:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
case R300_TX_FORMAT_FL_R16G16B16A16:
case R300_TX_FORMAT_FL_I32A32:
track->textures[i].cpp = 8;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_DXT1:
track->textures[i].cpp = 1;
927,8 → 968,8
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
return -EINVAL;
break;
}
track->tex_dirty = true;
break;
case 0x4400:
case 0x4404:
956,6 → 997,7
if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_h = false;
}
track->tex_dirty = true;
break;
case 0x4500:
case 0x4504:
993,6 → 1035,7
DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
}
track->tex_dirty = true;
break;
case 0x4480:
case 0x4484:
1022,6 → 1065,7
track->textures[i].use_pitch = !!tmp;
tmp = (idx_value >> 22) & 0xF;
track->textures[i].txdepth = tmp;
track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
1036,15 → 1080,71
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
track->color_channel_mask = idx_value;
track->cb_dirty = true;
break;
case 0x4d1c:
case 0x43a4:
/* SC_HYPERZ_EN */
/* r300c emits this register - we need to disable hyperz for it
* without complaining */
if (p->rdev->hyperz_filp != p->filp) {
if (idx_value & 0x1)
ib[idx] = idx_value & ~1;
}
break;
case 0x4f1c:
/* ZB_BW_CNTL */
track->fastfill = !!(idx_value & (1 << 2));
track->zb_cb_clear = !!(idx_value & (1 << 5));
track->cb_dirty = true;
track->zb_dirty = true;
if (p->rdev->hyperz_filp != p->filp) {
if (idx_value & (R300_HIZ_ENABLE |
R300_RD_COMP_ENABLE |
R300_WR_COMP_ENABLE |
R300_FAST_FILL_ENABLE))
goto fail;
}
break;
case 0x4e04:
/* RB3D_BLENDCNTL */
track->blend_read_enable = !!(idx_value & (1 << 2));
track->cb_dirty = true;
break;
case R300_RB3D_AARESOLVE_OFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
track->aa.robj = reloc->robj;
track->aa.offset = idx_value;
track->aa_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_RB3D_AARESOLVE_PITCH:
track->aa.pitch = idx_value & 0x3FFE;
track->aa_dirty = true;
break;
case R300_RB3D_AARESOLVE_CTL:
track->aaresolve = idx_value & 0x1;
track->aa_dirty = true;
break;
case 0x4f30: /* ZB_MASK_OFFSET */
case 0x4f34: /* ZB_ZMASK_PITCH */
case 0x4f44: /* ZB_HIZ_OFFSET */
case 0x4f54: /* ZB_HIZ_PITCH */
if (idx_value && (p->rdev->hyperz_filp != p->filp))
goto fail;
break;
case 0x4028:
if (idx_value && (p->rdev->hyperz_filp != p->filp))
goto fail;
/* GB_Z_PEQ_CONFIG */
if (p->rdev->family >= CHIP_RV350)
break;
goto fail;
break;
case 0x4be8:
/* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530)
1051,11 → 1151,13
break;
/* fallthrough do not move */
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
reg, idx);
return -EINVAL;
goto fail;
}
return 0;
fail:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
reg, idx, idx_value);
return -EINVAL;
}
 
static int r300_packet3_check(struct radeon_cs_parser *p,
1148,6 → 1250,15
return r;
}
break;
case PACKET3_3D_CLEAR_HIZ:
case PACKET3_3D_CLEAR_ZMASK:
if (p->rdev->hyperz_filp != p->filp)
return -EINVAL;
break;
case PACKET3_3D_CLEAR_CMASK:
if (p->rdev->cmask_filp != p->filp)
return -EINVAL;
break;
case PACKET3_NOP:
break;
default:
1164,6 → 1275,8
int r;
 
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (track == NULL)
return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
1281,6 → 1394,8
if (r)
return r;
}
 
 
/* Enable IRQ */
// r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1287,12 → 1402,9
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1316,6 → 1428,8
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
1330,7 → 1444,7
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
1343,8 → 1457,6
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
1381,14 → 1493,10
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
// r100_cp_fini(rdev);
// r100_wb_fini(rdev);
// r100_ib_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
// radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
/drivers/video/drm/radeon/r300_reg.h
608,7 → 608,7
* My guess is that there are two bits for each zbias primitive
* (FILL, LINE, POINT).
* One to enable depth test and one for depth write.
* Yet this doesnt explain why depth writes work ...
* Yet this doesn't explain why depth writes work ...
*/
#define R300_RE_OCCLUSION_CNTL 0x42B4
# define R300_OCCLUSION_ON (1<<1)
817,7 → 817,7
# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
 
/* NOTE: NEAREST doesnt seem to exist.
/* NOTE: NEAREST doesn't seem to exist.
* Im not seting MAG_FILTER_MASK and (3 << 11) on for all
* anisotropy modes because that would void selected mag filter
*/
1371,6 → 1371,8
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
 
#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
#define R300_RB3D_AARESOLVE_PITCH 0x4E84
#define R300_RB3D_AARESOLVE_CTL 0x4E88
/* gap */
 
/drivers/video/drm/radeon/r300d.h
48,10 → 48,13
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
#define PACKET3_3D_CLEAR_ZMASK 0x32
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
#define PACKET3_3D_CLEAR_HIZ 0x37
#define PACKET3_3D_CLEAR_CMASK 0x38
#define PACKET3_BITBLT_MULTI 0x9B
 
#define PACKET0(reg, n) (CP_PACKET0 | \
209,8 → 212,53
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
 
 
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
/drivers/video/drm/radeon/r420.c
26,9 → 26,11
* Jerome Glisse
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "r100d.h"
#include "r420d.h"
55,8 → 57,14
"programming pipes. Bad things might happen.\n");
}
/* get max number of pipes */
gb_pipe_select = RREG32(0x402C);
gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
 
/* SE chips have 1 pipe */
if ((rdev->pdev->device == 0x5e4c) ||
(rdev->pdev->device == 0x5e4f))
num_pipes = 1;
 
rdev->num_gb_pipes = num_pipes;
tmp = 0;
switch (num_pipes) {
202,24 → 210,14
}
r420_pipes_init(rdev);
/* Enable IRQ */
// r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r420_cp_errata_init(rdev);
// r = r100_wb_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// }
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
// return r;
// }
return 0;
}
 
233,7 → 231,7
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
262,6 → 260,8
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
279,7 → 279,7
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
291,8 → 291,6
 
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
304,14 → 302,7
r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r) {
// return r;
// }
// r = radeon_irq_kms_init(rdev);
// if (r) {
// return r;
// }
 
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
336,14 → 327,10
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
// r100_cp_fini(rdev);
// r100_wb_fini(rdev);
// r100_ib_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
// radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
/drivers/video/drm/radeon/r500_reg.h
347,12 → 347,16
 
#define AVIVO_D1CRTC_CONTROL 0x6080
# define AVIVO_CRTC_EN (1 << 0)
# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
 
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
 
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
384,6 → 388,11
# define AVIVO_D1GRPH_TILED (1 << 20)
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
 
# define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
# define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
# define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
# define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
 
/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
* block and vice versa. This applies to GRPH, CUR, etc.
*/
402,8 → 411,10
#define AVIVO_D1GRPH_X_END 0x6134
#define AVIVO_D1GRPH_Y_END 0x6138
#define AVIVO_D1GRPH_UPDATE 0x6144
# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
 
#define AVIVO_D1CUR_CONTROL 0x6400
# define AVIVO_D1CURSOR_EN (1 << 0)
488,6 → 499,7
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
 
/drivers/video/drm/radeon/r520.c
27,6 → 27,7
*/
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "r520d.h"
 
52,7 → 53,6
{
unsigned pipe_select_current, gb_pipe_select, tmp;
 
r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
79,8 → 79,8
WREG32(0x4128, 0xFF);
}
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
tmp = RREG32(R300_DST_PIPE_CONFIG);
pipe_select_current = (tmp >> 2) & 3;
tmp = (1 << pipe_select_current) |
(((gb_pipe_select >> 8) & 0xF) << 4);
121,19 → 121,14
 
void r520_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
 
r520_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
radeon_update_bandwidth_info(rdev);
}
 
void r520_mc_program(struct radeon_device *rdev)
187,22 → 182,13
return r;
}
/* Enable IRQ */
// rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
// return r;
// }
return 0;
}
 
212,12 → 198,12
{
int r;
 
ENTER();
 
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
233,7 → 219,7
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
249,8 → 235,6
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
262,12 → 246,6
r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r)
// return r;
// r = radeon_irq_kms_init(rdev);
// if (r)
// return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
281,15 → 259,8
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
// r100_cp_fini(rdev);
// r100_wb_fini(rdev);
// r100_ib_fini(rdev);
rv370_pcie_gart_fini(rdev);
// radeon_agp_fini(rdev);
rdev->accel_working = false;
}
 
LEAVE();
 
return 0;
}
/drivers/video/drm/radeon/r600.c
25,11 → 25,13
* Alex Deucher
* Jerome Glisse
*/
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_mode.h"
#include "r600d.h"
#include "atom.h"
41,6 → 43,10
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define CAYMAN_RLC_UCODE_SIZE 1024
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
65,6 → 71,25
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
MODULE_FIRMWARE("radeon/PALM_me.bin");
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
 
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
72,7 → 97,35
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
int actual_temp = temp & 0xff;
 
if (temp & 0x100)
actual_temp -= 256;
 
return actual_temp * 1000;
}
 
 
 
 
 
 
bool r600_gui_idle(struct radeon_device *rdev)
{
if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
return false;
else
return true;
}
 
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
358,6 → 411,19
u32 tmp;
 
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
 
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
* This seems to cause problems on some AGP cards. Just use the old
* method for them.
*/
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
383,7 → 449,7
int r;
 
if (rdev->gart.table.vram.robj) {
WARN(1, "R600 PCIE GART already initialized.\n");
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
490,9 → 556,9
 
void r600_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
 
void r600_agp_enable(struct radeon_device *rdev)
591,7 → 657,7
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
631,7 → 697,7
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*/
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
 
665,9 → 731,12
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
if (rdev->flags & RADEON_IS_IGP)
base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
if (rdev->flags & RADEON_IS_IGP) {
base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
}
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
}
}
674,7 → 743,6
 
int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
int chansize, numchan;
 
706,26 → 774,19
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
if (rdev->flags & RADEON_IS_IGP)
 
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
}
radeon_update_bandwidth_info(rdev);
return 0;
}
 
752,9 → 813,11
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
u32 srbm_reset = 0;
u32 tmp;
 
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0;
 
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
767,7 → 830,7
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
786,57 → 849,19
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
/* Reset others GPU block if necessary */
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_IH(1);
if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
WREG32(R_000E60_SRBM_SOFT_RESET, 0);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
WREG32(R_000E60_SRBM_SOFT_RESET, 0);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
843,16 → 868,44
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
/* After reset we need to reinit the asic as GPU often endup in an
* incoherent state.
*/
atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
 
int r600_gpu_reset(struct radeon_device *rdev)
bool r600_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
struct r100_gpu_lockup *lockup;
int r;
 
if (rdev->family >= CHIP_RV770)
lockup = &rdev->config.rv770.lockup;
else
lockup = &rdev->config.r600.lockup;
 
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
 
1095,7 → 1148,10
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.r600.tiling_group_size = 512;
else
rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
1122,7 → 1178,7
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
 
rdev->config.r600.tile_config = tiling_config;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1131,6 → 1187,7
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1404,7 → 1461,9
*/
void r600_cp_stop(struct radeon_device *rdev)
{
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
}
 
int r600_init_microcode(struct radeon_device *rdev)
1468,10 → 1527,35
chip_name = "RV710";
rlc_chip_name = "R700";
break;
case CHIP_CEDAR:
chip_name = "CEDAR";
rlc_chip_name = "CEDAR";
break;
case CHIP_REDWOOD:
chip_name = "REDWOOD";
rlc_chip_name = "REDWOOD";
break;
case CHIP_JUNIPER:
chip_name = "JUNIPER";
rlc_chip_name = "JUNIPER";
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
break;
case CHIP_PALM:
chip_name = "PALM";
rlc_chip_name = "SUMO";
break;
default: BUG();
}
 
if (rdev->family >= CHIP_RV770) {
if (rdev->family >= CHIP_CEDAR) {
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
} else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1545,7 → 1629,11
 
r600_cp_stop(rdev);
 
WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
WREG32(CP_RB_CNTL,
#ifdef __BIG_ENDIAN
BUF_SWAP_32BIT |
#endif
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1585,12 → 1673,12
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
if (rdev->family < CHIP_RV770) {
if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
radeon_ring_write(rdev, 0x3);
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
} else {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
1616,7 → 1704,7
 
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
1630,8 → 1718,23
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
 
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
#ifdef __BIG_ENDIAN
RB_RPTR_SWAP(2) |
#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
 
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
 
1668,7 → 1771,13
rdev->cp.align_mask = 16 - 1;
}
 
void r600_cp_fini(struct radeon_device *rdev)
{
r600_cp_stop(rdev);
radeon_ring_fini(rdev);
}
 
 
/*
* GPU scratch registers helpers function.
*/
1677,9 → 1786,10
int i;
 
rdev->scratch.num_reg = 7;
rdev->scratch.reg_base = SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
 
1722,13 → 1832,23
radeon_scratch_free(rdev, scratch);
return r;
}
 
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
 
if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(rdev, addr & 0xffffffff);
radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, 0);
} else {
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1741,6 → 1861,9
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
}
}
 
 
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
1754,28 → 1877,13
/* FIXME: implement */
}
 
 
bool r600_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
 
/* first check CRTCs */
reg = RREG32(D1CRTC_CONTROL) |
RREG32(D2CRTC_CONTROL);
if (reg & CRTC_EN)
return true;
 
/* then check MEM_SIZE, in case the crtcs are off */
if (RREG32(CONFIG_MEMSIZE))
return true;
 
return false;
}
 
int r600_startup(struct radeon_device *rdev)
{
int r;
 
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
1803,8 → 1911,7
r = r600_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
// r600_wb_enable(rdev);
 
return 0;
}
 
1836,16 → 1943,13
{
int r;
 
r = radeon_dummy_page_init(rdev);
if (r)
return r;
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
// r = radeon_gem_init(rdev);
// if (r)
// return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
1860,7 → 1964,7
if (r)
return r;
/* Post card if necessary */
if (!r600_card_posted(rdev)) {
if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
1874,12 → 1978,10
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
/* Initialize power management */
radeon_pm_init(rdev);
/* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r)
// return r;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
1932,7 → 2034,43
return 0;
}
 
static void r600_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
 
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
if (ASIC_IS_DCE32(rdev)) {
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
 
 
 
1940,6 → 2078,7
 
 
 
 
/*
* Debugfs info
*/
2007,5 → 2146,237
*/
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
* This seems to cause problems on some AGP cards. Just use the old
* method for them.
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
 
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
 
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
u32 link_width_cntl, mask, target_reg;
 
if (rdev->flags & RADEON_IS_IGP)
return;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return;
 
/* x2 cards have a special sequence */
if (ASIC_IS_X2(rdev))
return;
 
/* FIXME wait for idle */
 
switch (lanes) {
case 0:
mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
break;
case 1:
mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
break;
case 2:
mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
break;
case 4:
mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
break;
case 8:
mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
break;
case 12:
mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
break;
case 16:
default:
mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
break;
}
 
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
 
if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
(mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
return;
 
if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
return;
 
link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
RADEON_PCIE_LC_RECONFIG_NOW |
R600_PCIE_LC_RENEGOTIATE_EN |
R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= mask;
 
WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
 
/* some northbridges can renegotiate the link rather than requiring
* a complete re-config.
* e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
*/
if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
else
link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
 
WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
RADEON_PCIE_LC_RECONFIG_NOW));
 
if (rdev->family >= CHIP_RV770)
target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
else
target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
 
/* wait for lane set to complete */
link_width_cntl = RREG32(target_reg);
while (link_width_cntl == 0xffffffff)
link_width_cntl = RREG32(target_reg);
 
}
 
int r600_get_pcie_lanes(struct radeon_device *rdev)
{
u32 link_width_cntl;
 
if (rdev->flags & RADEON_IS_IGP)
return 0;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return 0;
 
/* x2 cards have a special sequence */
if (ASIC_IS_X2(rdev))
return 0;
 
/* FIXME wait for idle */
 
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
 
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
case RADEON_PCIE_LC_LINK_WIDTH_X0:
return 0;
case RADEON_PCIE_LC_LINK_WIDTH_X1:
return 1;
case RADEON_PCIE_LC_LINK_WIDTH_X2:
return 2;
case RADEON_PCIE_LC_LINK_WIDTH_X4:
return 4;
case RADEON_PCIE_LC_LINK_WIDTH_X8:
return 8;
case RADEON_PCIE_LC_LINK_WIDTH_X16:
default:
return 16;
}
}
 
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
 
if (radeon_pcie_gen2 == 0)
return;
 
if (rdev->flags & RADEON_IS_IGP)
return;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return;
 
/* x2 cards have a special sequence */
if (ASIC_IS_X2(rdev))
return;
 
/* only RV6xx+ chips are supported */
if (rdev->family <= CHIP_R600)
return;
 
/* 55 nm r6xx asics */
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
/* advertise upconfig capability */
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
/* 55 nm r6xx asics */
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
WREG32(MM_CFGREGS_CNTL, 0x8);
link_cntl2 = RREG32(0x4088);
WREG32(MM_CFGREGS_CNTL, 0);
/* not supported yet */
if (link_cntl2 & SELECTABLE_DEEMPHASIS)
return;
}
 
speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
 
tmp = RREG32(0x541c);
WREG32(0x541c, tmp | 0x8);
WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
link_cntl2 = RREG16(0x4088);
link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
link_cntl2 |= 0x2;
WREG16(0x4088, link_cntl2);
WREG32(MM_CFGREGS_CNTL, 0);
 
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
training_cntl &= ~LC_POINT_7_PLUS_EN;
WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
} else {
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
}
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
 
} else {
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
/drivers/video/drm/radeon/r600_audio.c
35,7 → 35,7
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
return rdev->family >= CHIP_R600
return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
44,7 → 44,7
/*
* current number of channels
*/
static int r600_audio_channels(struct radeon_device *rdev)
int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
52,7 → 52,7
/*
* current bits per sample
*/
static int r600_audio_bits_per_sample(struct radeon_device *rdev)
int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
63,7 → 63,8
case 0x4: return 32;
}
 
DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
(int)value);
 
return 16;
}
71,7 → 72,7
/*
* current sampling rate in HZ
*/
static int r600_audio_rate(struct radeon_device *rdev)
int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
90,7 → 91,7
/*
* iec 60958 status bits
*/
static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
98,7 → 99,7
/*
* iec 60958 category code
*/
static uint8_t r600_audio_category_code(struct radeon_device *rdev)
uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
118,7 → 119,7
uint8_t category_code = r600_audio_category_code(rdev);
 
struct drm_encoder *encoder;
int changes = 0;
int changes = 0, still_going = 0;
 
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
135,11 → 136,10
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(
encoder, channels,
rate, bps, status_bits,
category_code);
r600_hdmi_update_audio_settings(encoder);
}
 
// mod_timer(&rdev->audio_timer,
151,8 → 151,9
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
rdev->audio_enabled = enable;
}
 
/*
182,41 → 183,6
}
 
/*
* determin how the encoders and audio interface is wired together
*/
int r600_audio_tmds_index(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *other;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
return 0;
 
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
/* special case check if an TMDS1 is present */
list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
if (to_radeon_encoder(other)->encoder_id ==
ENCODER_OBJECT_ID_INTERNAL_TMDS1)
return 1;
}
return 0;
 
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
return 1;
 
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return -1;
}
}
 
/*
* atach the audio codec to the clock source of the encoder
*/
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
224,6 → 190,7
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int base_rate = 48000;
 
switch (radeon_encoder->encoder_id) {
231,7 → 198,6
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
break;
 
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
238,14 → 204,13
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
 
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
 
switch (r600_audio_tmds_index(encoder)) {
switch (dig->dig_encoder) {
case 0:
WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
WREG32(R600_AUDIO_PLL1_DIV, clock*100);
257,6 → 222,10
WREG32(R600_AUDIO_PLL2_DIV, clock*100);
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
default:
dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
}
 
266,7 → 235,7
*/
void r600_audio_fini(struct radeon_device *rdev)
{
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
if (!rdev->audio_enabled)
return;
 
// del_timer(&rdev->audio_timer);
/drivers/video/drm/radeon/r600_hdmi.c
26,6 → 26,7
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
 
/*
290,17 → 291,15
if (!offset)
return;
 
if (r600_hdmi_is_audio_buffer_filled(encoder)) {
/* disable audio workaround and start delivering of audio frames */
if (!radeon_encoder->hdmi_audio_workaround ||
r600_hdmi_is_audio_buffer_filled(encoder)) {
 
/* disable audio workaround */
WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
 
} else if (radeon_encoder->hdmi_audio_workaround) {
/* enable audio workaround and start delivering of audio frames */
} else {
/* enable audio workaround */
WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
 
} else {
/* disable audio workaround and stop delivering of audio frames */
WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
}
}
 
314,6 → 313,9
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
if (ASIC_IS_DCE4(rdev))
return;
 
if (!offset)
return;
 
332,7 → 334,7
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
 
/* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
342,25 → 344,23
 
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
 
/* update? reset? don't realy know */
WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
 
/*
* update settings with current parameters from audio engine
*/
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
int channels,
int rate,
int bps,
uint8_t status_bits,
uint8_t category_code)
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
int bps = r600_audio_bits_per_sample(rdev);
uint8_t status_bits = r600_audio_status_bits(rdev);
uint8_t category_code = r600_audio_category_code(rdev);
 
uint32_t iec;
 
if (!offset)
412,95 → 412,168
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
 
r600_hdmi_audio_workaround(encoder);
}
 
/* update? reset? don't realy know */
WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
static int r600_hdmi_find_free_block(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
bool free_blocks[3] = { true, true, true };
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->hdmi_offset) {
case R600_HDMI_BLOCK1:
free_blocks[0] = false;
break;
case R600_HDMI_BLOCK2:
free_blocks[1] = false;
break;
case R600_HDMI_BLOCK3:
free_blocks[2] = false;
break;
}
}
 
if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
rdev->family == CHIP_RS740) {
return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
} else if (rdev->family >= CHIP_R600) {
if (free_blocks[0])
return R600_HDMI_BLOCK1;
else if (free_blocks[1])
return R600_HDMI_BLOCK2;
}
return 0;
}
 
static void r600_hdmi_assign_block(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
}
 
if (ASIC_IS_DCE4(rdev)) {
/* TODO */
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
if (ASIC_IS_DCE32(rdev))
radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
} else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
 
/*
* enable/disable the HDMI engine
* enable the HDMI engine
*/
void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
void r600_hdmi_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
uint32_t offset;
 
if (!offset)
if (ASIC_IS_DCE4(rdev))
return;
 
DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
if (!radeon_encoder->hdmi_offset) {
r600_hdmi_assign_block(encoder);
if (!radeon_encoder->hdmi_offset) {
dev_warn(rdev->dev, "Could not find HDMI block for "
"0x%x encoder\n", radeon_encoder->encoder_id);
return;
}
}
 
/* some version of atombios ignore the enable HDMI flag
* so enabling/disabling HDMI was moved here for TMDS1+2 */
offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
 
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
 
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* This part is doubtfull in my opinion */
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
break;
 
default:
DRM_ERROR("unknown HDMI output type\n");
dev_err(rdev->dev, "Unknown HDMI output type\n");
break;
}
}
#if 0
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
&& rdev->family != CHIP_RS740) {
 
/* if irq is available use it */
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
 
r600_audio_disable_polling(encoder);
} else {
/* if not fallback to polling */
r600_audio_enable_polling(encoder);
}
#endif
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
}
 
/*
* determin at which register offset the HDMI encoder is
* disable the HDMI engine
*/
void r600_hdmi_init(struct drm_encoder *encoder)
void r600_hdmi_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
 
if (ASIC_IS_DCE4(rdev))
return;
 
offset = radeon_encoder->hdmi_offset;
if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
}
 
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
offset, radeon_encoder->encoder_id);
 
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
 
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
switch (r600_audio_tmds_index(encoder)) {
case 0:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case 1:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
break;
default:
radeon_encoder->hdmi_offset = 0;
dev_err(rdev->dev, "Unknown HDMI output type\n");
break;
}
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
break;
}
 
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
radeon_encoder->hdmi_offset = R600_HDMI_DIG;
break;
 
default:
radeon_encoder->hdmi_offset = 0;
break;
radeon_encoder->hdmi_config_offset = 0;
}
 
DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
 
/* TODO: make this configureable */
radeon_encoder->hdmi_audio_workaround = 0;
}
/drivers/video/drm/radeon/r600_reg.h
81,11 → 81,16
#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
 
#define R600_D1GRPH_SWAP_CONTROL 0x610C
# define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
# define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
# define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
# define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
 
 
#define R600_HDP_NONSURFACE_BASE 0x2c04
 
#define R600_BUS_CNTL 0x5420
# define R600_BIOS_ROM_DIS (1 << 1)
#define R600_CONFIG_CNTL 0x5424
#define R600_CONFIG_MEMSIZE 0x5428
#define R600_CONFIG_F0_BASE 0x542C
152,14 → 157,17
#define R600_AUDIO_STATUS_BITS 0x73d8
 
/* HDMI base register addresses */
#define R600_HDMI_TMDS1 0x7400
#define R600_HDMI_TMDS2 0x7700
#define R600_HDMI_DIG 0x7800
#define R600_HDMI_BLOCK1 0x7400
#define R600_HDMI_BLOCK2 0x7700
#define R600_HDMI_BLOCK3 0x7800
 
/* HDMI registers */
#define R600_HDMI_ENABLE 0x00
#define R600_HDMI_STATUS 0x04
# define R600_HDMI_INT_PENDING (1 << 29)
#define R600_HDMI_CNTL 0x08
# define R600_HDMI_INT_EN (1 << 28)
# define R600_HDMI_INT_ACK (1 << 29)
#define R600_HDMI_UNKNOWN_0 0x0C
#define R600_HDMI_AUDIOCNTL 0x10
#define R600_HDMI_VIDEOCNTL 0x14
185,4 → 193,8
#define R600_HDMI_AUDIO_DEBUG_2 0xe8
#define R600_HDMI_AUDIO_DEBUG_3 0xec
 
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
#define R600_HDMI_CONFIG2 0x7a00
 
#endif
/drivers/video/drm/radeon/r600d.h
51,6 → 51,12
#define PTE_READABLE (1 << 5)
#define PTE_WRITEABLE (1 << 6)
 
/* tiling bits */
#define ARRAY_LINEAR_GENERAL 0x00000000
#define ARRAY_LINEAR_ALIGNED 0x00000001
#define ARRAY_1D_TILED_THIN1 0x00000002
#define ARRAY_2D_TILED_THIN1 0x00000004
 
/* Registers */
#define ARB_POP 0x2418
#define ENABLE_TC128 (1 << 30)
77,6 → 83,55
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
 
#define SQ_ALU_CONST_CACHE_PS_0 0x28940
#define SQ_ALU_CONST_CACHE_PS_1 0x28944
#define SQ_ALU_CONST_CACHE_PS_2 0x28948
#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
#define SQ_ALU_CONST_CACHE_PS_4 0x28950
#define SQ_ALU_CONST_CACHE_PS_5 0x28954
#define SQ_ALU_CONST_CACHE_PS_6 0x28958
#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
#define SQ_ALU_CONST_CACHE_PS_8 0x28960
#define SQ_ALU_CONST_CACHE_PS_9 0x28964
#define SQ_ALU_CONST_CACHE_PS_10 0x28968
#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
#define SQ_ALU_CONST_CACHE_PS_12 0x28970
#define SQ_ALU_CONST_CACHE_PS_13 0x28974
#define SQ_ALU_CONST_CACHE_PS_14 0x28978
#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
#define SQ_ALU_CONST_CACHE_VS_0 0x28980
#define SQ_ALU_CONST_CACHE_VS_1 0x28984
#define SQ_ALU_CONST_CACHE_VS_2 0x28988
#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
#define SQ_ALU_CONST_CACHE_VS_4 0x28990
#define SQ_ALU_CONST_CACHE_VS_5 0x28994
#define SQ_ALU_CONST_CACHE_VS_6 0x28998
#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
 
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
#define CP_STAT 0x8680
106,6 → 161,7
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
#define RB_RPTR_SWAP(x) ((x) << 0)
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
190,6 → 246,11
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1<<0)
 
#define CG_THERMAL_STATUS 0x7F4
#define ASIC_T(x) ((x) << 0)
#define ASIC_T_MASK 0x1FF
#define ASIC_T_SHIFT 0
 
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
196,6 → 257,7
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
#define HDP_DEBUG1 0x2F34
 
#define MC_VM_AGP_TOP 0x2184
#define MC_VM_AGP_BOT 0x2188
419,6 → 481,7
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
#define VGT_EVENT_INITIATOR 0x28a90
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
 
#define VM_CONTEXT0_CNTL 0x1410
666,6 → 729,54
/* DCE 3.2 */
# define DC_HPDx_EN (1 << 28)
 
#define D1GRPH_INTERRUPT_STATUS 0x6158
#define D2GRPH_INTERRUPT_STATUS 0x6958
# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
#define D1GRPH_INTERRUPT_CONTROL 0x615c
#define D2GRPH_INTERRUPT_CONTROL 0x695c
# define DxGRPH_PFLIP_INT_MASK (1 << 0)
# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
 
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
# define LC_POINT_7_PLUS_EN (1 << 6)
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
# define LC_LINK_WIDTH_MASK 0x7
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
# define LC_LINK_WIDTH_RD_SHIFT 4
# define LC_LINK_WIDTH_RD_MASK 0x70
# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
# define LC_RECONFIG_NOW (1 << 8)
# define LC_RENEGOTIATION_SUPPORT (1 << 9)
# define LC_RENEGOTIATE_EN (1 << 10)
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
#define MM_CFGREGS_CNTL 0x544c
# define MM_WR_TO_CFG_EN (1 << 3)
#define LINK_CNTL2 0x88 /* F0 */
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/*
* PM4
*/
720,7 → 831,27
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
/* 0 - any non-TS event
* 1 - ZPASS_DONE
* 2 - SAMPLE_PIPELINESTAT
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
* 5 - TS events
*/
#define PACKET3_EVENT_WRITE_EOP 0x47
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
* 2 - send 64bit data
* 3 - send 64bit counter value
*/
#define INT_SEL(x) ((x) << 24)
/* 0 - none
* 1 - interrupt only (DATA_SEL = 0)
* 2 - interrupt when data write is confirmed
*/
#define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
1105,6 → 1236,10
#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
#define C_038000_TILE_MODE 0xFFFFFF87
#define V_038000_ARRAY_LINEAR_GENERAL 0x00000000
#define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001
#define V_038000_ARRAY_1D_TILED_THIN1 0x00000002
#define V_038000_ARRAY_2D_TILED_THIN1 0x00000004
#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
#define C_038000_TILE_TYPE 0xFFFFFF7F
1169,6 → 1304,14
#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
#define V_038004_FMT_32_32_32 0x0000002F
#define V_038004_FMT_32_32_32_FLOAT 0x00000030
#define V_038004_FMT_BC1 0x00000031
#define V_038004_FMT_BC2 0x00000032
#define V_038004_FMT_BC3 0x00000033
#define V_038004_FMT_BC4 0x00000034
#define V_038004_FMT_BC5 0x00000035
#define V_038004_FMT_BC6 0x00000036
#define V_038004_FMT_BC7 0x00000037
#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
1308,6 → 1451,8
#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
#define C_028010_ARRAY_MODE 0xFFF87FFF
#define V_028010_ARRAY_1D_TILED_THIN1 0x00000002
#define V_028010_ARRAY_2D_TILED_THIN1 0x00000004
#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
/drivers/video/drm/radeon/radeon.h
60,7 → 60,6
* are considered as fatal)
*/
 
#include <ddk.h>
#include <asm/atomic.h>
 
#include <linux/list.h>
83,6 → 82,8
 
#include <syscall.h>
 
extern unsigned long volatile jiffies;
 
/*
* Modules parameters.
*/
97,10 → 98,10
extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
extern int radeon_dynpm;
extern int radeon_audio;
 
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
typedef struct pm_message {
int event;
} pm_message_t;
160,11 → 161,29
#define writeq __raw_writeq
 
 
static inline u32 ioread32(const volatile void __iomem *addr)
{
return in32((u32)addr);
}
 
static inline void iowrite32(uint32_t b, volatile void __iomem *addr)
{
out32((u32)addr, b);
}
 
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
 
 
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
230,6 → 249,7
uint32_t default_sclk;
uint32_t default_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
 
/*
236,9 → 256,18
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
 
/*
* Fences.
247,8 → 276,9
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
unsigned long count_timeout;
// wait_queue_head_t queue;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
struct list_head emited;
262,7 → 292,6
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
unsigned long timeout;
bool emited;
bool signaled;
};
293,7 → 322,7
*/
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
// struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
315,12 → 344,12
int surface_reg;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object *gobj;
struct drm_gem_object gem_base;
u32 domain;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
struct radeon_bo_list {
struct list_head list;
struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
377,6 → 406,7
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
bool *ttm_alloced;
bool ready;
};
 
413,6 → 443,7
int vram_mtrr;
bool vram_is_ddr;
bool igp_sideport_enabled;
u64 gtt_base_align;
};
 
bool radeon_combios_sideport_present(struct radeon_device *rdev);
423,6 → 454,7
*/
struct radeon_scratch {
unsigned num_reg;
uint32_t reg_base;
bool free[32];
uint32_t reg[32];
};
434,15 → 466,58
/*
* IRQS.
*/
struct r500_irq_stat_regs {
u32 disp_int;
};
 
struct r600_irq_stat_regs {
u32 disp_int;
u32 disp_int_cont;
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
};
 
struct evergreen_irq_stat_regs {
u32 disp_int;
u32 disp_int_cont;
u32 disp_int_cont2;
u32 disp_int_cont3;
u32 disp_int_cont4;
u32 disp_int_cont5;
u32 d1grph_int;
u32 d2grph_int;
u32 d3grph_int;
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
};
 
union radeon_irq_stat_regs {
struct r500_irq_stat_regs r500;
struct r600_irq_stat_regs r600;
struct evergreen_irq_stat_regs evergreen;
};
 
struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
bool crtc_vblank_int[6];
bool pflip[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
/* FIXME: use defines for max HDMI blocks */
bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[6];
int pflip_refcount[6];
};
 
int radeon_irq_kms_init(struct radeon_device *rdev);
528,7 → 603,9
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
632,6 → 709,7
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
void radeon_agp_suspend(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);
 
 
642,8 → 720,17
struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
bool enabled;
bool use_event;
};
 
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
#define R600_WB_IH_WPTR_OFFSET 2048
#define R600_WB_EVENT_OFFSET 3072
 
/**
* struct radeon_pm - power management datas
* @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
655,26 → 742,34
* @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
* @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
* @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
* @sclk: GPU clock Mhz (core bandwith depends of this clock)
* @sclk: GPU clock Mhz (core bandwidth depends of this clock)
* @needed_bandwidth: current bandwidth needs
*
* It keeps track of various data needed to take powermanagement decision.
* Bandwith need is used to determine minimun clock of the GPU and memory.
* Bandwidth need is used to determine minimun clock of the GPU and memory.
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
enum radeon_pm_state {
PM_STATE_DISABLED,
PM_STATE_MINIMUM,
PM_STATE_PAUSED,
PM_STATE_ACTIVE
 
enum radeon_pm_method {
PM_METHOD_PROFILE,
PM_METHOD_DYNPM,
};
enum radeon_pm_action {
PM_ACTION_NONE,
PM_ACTION_MINIMUM,
PM_ACTION_DOWNCLOCK,
PM_ACTION_UPCLOCK
 
enum radeon_dynpm_state {
DYNPM_STATE_DISABLED,
DYNPM_STATE_MINIMUM,
DYNPM_STATE_PAUSED,
DYNPM_STATE_ACTIVE,
DYNPM_STATE_SUSPENDED,
};
enum radeon_dynpm_action {
DYNPM_ACTION_NONE,
DYNPM_ACTION_MINIMUM,
DYNPM_ACTION_DOWNCLOCK,
DYNPM_ACTION_UPCLOCK,
DYNPM_ACTION_DEFAULT
};
 
enum radeon_voltage_type {
VOLTAGE_NONE = 0,
691,13 → 786,39
POWER_STATE_TYPE_PERFORMANCE,
};
 
enum radeon_pm_clock_mode_type {
POWER_MODE_TYPE_DEFAULT,
POWER_MODE_TYPE_LOW,
POWER_MODE_TYPE_MID,
POWER_MODE_TYPE_HIGH,
enum radeon_pm_profile_type {
PM_PROFILE_DEFAULT,
PM_PROFILE_AUTO,
PM_PROFILE_LOW,
PM_PROFILE_MID,
PM_PROFILE_HIGH,
};
 
#define PM_PROFILE_DEFAULT_IDX 0
#define PM_PROFILE_LOW_SH_IDX 1
#define PM_PROFILE_MID_SH_IDX 2
#define PM_PROFILE_HIGH_SH_IDX 3
#define PM_PROFILE_LOW_MH_IDX 4
#define PM_PROFILE_MID_MH_IDX 5
#define PM_PROFILE_HIGH_MH_IDX 6
#define PM_PROFILE_MAX 7
 
struct radeon_pm_profile {
int dpms_off_ps_idx;
int dpms_on_ps_idx;
int dpms_off_cm_idx;
int dpms_on_cm_idx;
};
 
enum radeon_int_thermal_type {
THERMAL_TYPE_NONE,
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
THERMAL_TYPE_EVERGREEN,
THERMAL_TYPE_SUMO,
THERMAL_TYPE_NI,
};
 
struct radeon_voltage {
enum radeon_voltage_type type;
/* gpio voltage */
709,15 → 830,13
u8 vddci_id; /* index into vddci voltage table */
bool vddci_enabled;
/* r6xx+ sw */
u32 voltage;
u16 voltage;
/* evergreen+ vddci */
u16 vddci;
};
 
struct radeon_pm_non_clock_info {
/* pcie lanes */
int pcie_lanes;
/* standardized non-clock flags */
u32 flags;
};
/* clock mode flags */
#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
 
struct radeon_pm_clock_info {
/* memory clock */
726,10 → 845,13
u32 sclk;
/* voltage info */
struct radeon_voltage voltage;
/* standardized clock flags - not sure we'll need these */
/* standardized clock flags */
u32 flags;
};
 
/* state flags */
#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
 
struct radeon_power_state {
enum radeon_pm_state_type type;
/* XXX: use a define for num clock modes */
737,9 → 859,11
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
/* non clock info about this state */
struct radeon_pm_non_clock_info non_clock_info;
bool voltage_drop_active;
/* standardized state flags */
u32 flags;
u32 misc; /* vbios specific flags */
u32 misc2; /* vbios specific flags */
int pcie_lanes; /* pcie lanes */
};
 
/*
749,13 → 873,11
 
struct radeon_pm {
struct mutex mutex;
// struct delayed_work idle_work;
enum radeon_pm_state state;
enum radeon_pm_action planned_action;
unsigned long action_timeout;
bool downclocked;
int active_crtcs;
u32 active_crtcs;
int active_crtc_count;
int req_vblank;
bool vblank_sync;
bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
766,16 → 888,41
fixed20_12 ht_bandwidth;
fixed20_12 core_bandwidth;
fixed20_12 sclk;
fixed20_12 mclk;
fixed20_12 needed_bandwidth;
/* XXX: use a define for num power modes */
struct radeon_power_state power_state[8];
struct radeon_power_state *power_state;
/* number of valid power states */
int num_power_states;
struct radeon_power_state *current_power_state;
struct radeon_pm_clock_info *current_clock_mode;
struct radeon_power_state *requested_power_state;
struct radeon_pm_clock_info *requested_clock_mode;
struct radeon_power_state *default_power_state;
int current_power_state_index;
int current_clock_mode_index;
int requested_power_state_index;
int requested_clock_mode_index;
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
u16 current_vddc;
u16 current_vddci;
u32 default_sclk;
u32 default_mclk;
u16 default_vddc;
u16 default_vddci;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
/* dynpm power management */
// struct delayed_work dynpm_idle_work;
enum radeon_dynpm_state dynpm_state;
enum radeon_dynpm_action dynpm_planned_action;
unsigned long dynpm_action_timeout;
bool dynpm_can_upclock;
bool dynpm_can_downclock;
/* profile-based power management */
enum radeon_pm_profile_type profile;
int profile_index;
struct radeon_pm_profile profiles[PM_PROFILE_MAX];
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
};
 
/*
787,7 → 934,8
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*gpu_reset)(struct radeon_device *rdev);
bool (*gpu_is_lockup)(struct radeon_device *rdev);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
827,7 → 975,7
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
840,15 → 988,32
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
bool (*gui_idle)(struct radeon_device *rdev);
/* power management */
void (*pm_misc)(struct radeon_device *rdev);
void (*pm_prepare)(struct radeon_device *rdev);
void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev);
/* pageflipping */
void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
void (*post_page_flip)(struct radeon_device *rdev, int crtc);
};
 
/*
* Asic structures
*/
struct r100_gpu_lockup {
unsigned long last_jiffies;
u32 last_cp_rptr;
};
 
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
 
struct r300_asic {
856,6 → 1021,7
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
 
struct r600_asic {
875,6 → 1041,8
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
 
struct rv770_asic {
898,20 → 1066,97
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
 
struct evergreen_asic {
unsigned num_ses;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
unsigned max_backends;
unsigned max_gprs;
unsigned max_threads;
unsigned max_stack_entries;
unsigned max_hw_contexts;
unsigned max_gs_threads;
unsigned sx_max_export_size;
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
 
struct cayman_asic {
unsigned max_shader_engines;
unsigned max_pipes_per_simd;
unsigned max_tile_pipes;
unsigned max_simds_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
unsigned max_threads;
unsigned max_gs_threads;
unsigned max_stack_entries;
unsigned sx_num_of_sets;
unsigned sx_max_export_size;
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned max_hw_contexts;
unsigned sq_num_cf_insts;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
 
unsigned num_shader_engines;
unsigned num_shader_pipes_per_simd;
unsigned num_tile_pipes;
unsigned num_simds_per_se;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
unsigned mem_max_burst_length_bytes;
unsigned mem_row_size_in_kb;
unsigned shader_engine_tile_size;
unsigned num_gpus;
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
 
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
struct evergreen_asic evergreen;
struct cayman_asic cayman;
};
 
 
/*
* asic initizalization from radeon_asic.c
*/
void radeon_agp_disable(struct radeon_device *rdev);
int radeon_asic_init(struct radeon_device *rdev);
 
 
 
/* VRAM scratch page for HDP bug */
struct r700_vram_scratch {
struct radeon_bo *robj;
volatile uint32_t *ptr;
};
 
/*
* Core structure, functions and helpers.
937,12 → 1182,9
bool is_atom_bios;
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
struct fb_info *fbdev_info;
struct radeon_bo *fbdev_rbo;
struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
unsigned long rmmio_base;
unsigned long rmmio_size;
resource_size_t rmmio_base;
resource_size_t rmmio_size;
void *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
951,6 → 1193,9
uint32_t pcie_reg_mask;
radeon_rreg_t pciep_rreg;
radeon_wreg_t pciep_wreg;
/* io port */
void __iomem *rio_mem;
resource_size_t rio_mem_size;
struct radeon_clock clock;
struct radeon_mc mc;
struct radeon_gart gart;
959,8 → 1204,11
struct radeon_mman mman;
struct radeon_fence_driver fence_drv;
struct radeon_cp cp;
/* cayman compute rings */
struct radeon_cp cp1;
struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
// struct radeon_irq irq;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
977,12 → 1225,19
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
struct r600_blit r600_blit;
struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
// struct r600_ih ih; /* r6/700 interrupt ring */
// struct workqueue_struct *wq;
// struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
 
/* audio stuff */
bool audio_enabled;
// struct timer_list audio_timer;
int audio_channels;
int audio_rate;
990,7 → 1245,9
uint8_t audio_status_bits;
uint8_t audio_category_code;
 
bool powered_down;
 
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
};
 
int radeon_device_init(struct radeon_device *rdev,
1000,13 → 1257,6
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
 
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
1027,6 → 1277,26
}
}
 
static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
return ioread32(rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
return ioread32(rdev->rio_mem + RADEON_MM_DATA);
}
}
 
static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
if (reg < rdev->rio_mem_size)
iowrite32(v, rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
}
}
 
/*
* Cast helper
*/
1037,6 → 1307,8
*/
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1063,6 → 1335,8
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
 
/*
* Indirect registers accessor
1106,10 → 1380,25
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
(rdev->ddev->pdev->device == 0x9443) || \
(rdev->ddev->pdev->device == 0x944B) || \
(rdev->ddev->pdev->device == 0x9506) || \
(rdev->ddev->pdev->device == 0x9509) || \
(rdev->ddev->pdev->device == 0x950F) || \
(rdev->ddev->pdev->device == 0x689C) || \
(rdev->ddev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
(rdev->family == CHIP_RS740) || \
(rdev->family >= CHIP_R600))
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
(rdev->flags & RADEON_IS_IGP))
#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
 
/*
* BIOS helpers.
1150,7 → 1439,8
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
1178,9 → 1468,19
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
 
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
1187,10 → 1487,13
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_wb_fini(struct radeon_device *rdev);
extern int radeon_wb_init(struct radeon_device *rdev);
extern void radeon_wb_disable(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
1201,160 → 1504,25
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
u32 GENMO_WT;
u32 CRTC_EXT_CNTL;
u32 CRTC_GEN_CNTL;
u32 CRTC2_GEN_CNTL;
u32 CUR_OFFSET;
u32 CUR2_OFFSET;
};
extern void r100_cp_disable(struct radeon_device *rdev);
extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
extern void r100_cp_fini(struct radeon_device *rdev);
extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
extern int r100_pci_gart_init(struct radeon_device *rdev);
extern void r100_pci_gart_fini(struct radeon_device *rdev);
extern int r100_pci_gart_enable(struct radeon_device *rdev);
extern void r100_pci_gart_disable(struct radeon_device *rdev);
extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
extern void r100_ib_fini(struct radeon_device *rdev);
extern int r100_ib_init(struct radeon_device *rdev);
extern void r100_irq_disable(struct radeon_device *rdev);
extern int r100_irq_set(struct radeon_device *rdev);
extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
extern void r100_vram_init_sizes(struct radeon_device *rdev);
extern void r100_wb_disable(struct radeon_device *rdev);
extern void r100_wb_fini(struct radeon_device *rdev);
extern int r100_wb_init(struct radeon_device *rdev);
extern void r100_hdp_reset(struct radeon_device *rdev);
extern int r100_rb2d_reset(struct radeon_device *rdev);
extern int r100_cp_reset(struct radeon_device *rdev);
extern void r100_vga_render_disable(struct radeon_device *rdev);
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
struct radeon_bo *robj);
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
radeon_packet0_check_t check);
extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
extern void r100_enable_bm(struct radeon_device *rdev);
extern void r100_set_common_regs(struct radeon_device *rdev);
/*
* r600 functions used by radeon_encoder.c
*/
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
/* r300,r350,rv350,rv370,rv380 */
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
extern void r300_mc_init(struct radeon_device *rdev);
extern void r300_clock_startup(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
#else
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
#endif
 
/* r420,r423,rv410 */
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
extern void r420_pipes_init(struct radeon_device *rdev);
 
/* rv515 */
struct rv515_mc_save {
u32 d1vga_control;
u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
u32 d1crtc_control;
u32 d2crtc_control;
};
extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
extern void rv515_vga_render_disable(struct radeon_device *rdev);
extern void rv515_set_safe_registers(struct radeon_device *rdev);
extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
extern void rv515_clock_startup(struct radeon_device *rdev);
extern void rv515_debugfs(struct radeon_device *rdev);
extern int rv515_suspend(struct radeon_device *rdev);
 
/* rs400 */
extern int rs400_gart_init(struct radeon_device *rdev);
extern int rs400_gart_enable(struct radeon_device *rdev);
extern void rs400_gart_adjust_size(struct radeon_device *rdev);
extern void rs400_gart_disable(struct radeon_device *rdev);
extern void rs400_gart_fini(struct radeon_device *rdev);
 
/* rs600 */
extern void rs600_set_safe_registers(struct radeon_device *rdev);
extern int rs600_irq_set(struct radeon_device *rdev);
extern void rs600_irq_disable(struct radeon_device *rdev);
 
/* rs690, rs740 */
extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2);
 
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int r600_ib_test(struct radeon_device *rdev);
extern int r600_ring_test(struct radeon_device *rdev);
extern void r600_wb_fini(struct radeon_device *rdev);
extern int r600_wb_enable(struct radeon_device *rdev);
extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
int channels,
int rate,
int bps,
uint8_t status_bits,
uint8_t category_code);
 
/* evergreen */
struct evergreen_mc_save {
u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
u32 crtc_control[6];
};
 
#include "radeon_object.h"
 
#define DRM_UDELAY(d) udelay(d)
1368,4 → 1536,23
videomode_t *mode, bool strict);
 
 
 
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
 
/*
* The first word is the work queue pointer and the flags rolled into
* one
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
 
struct work_struct {
atomic_long_t data;
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
#define WORK_STRUCT_FLAG_MASK (3UL)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
work_func_t func;
};
 
#endif
/drivers/video/drm/radeon/radeon_agp.c
134,13 → 134,11
int ret;
 
/* Acquire AGP. */
if (!rdev->ddev->agp->acquired) {
ret = drm_agp_acquire(rdev->ddev);
if (ret) {
DRM_ERROR("Unable to acquire AGP: %d\n", ret);
return ret;
}
}
 
ret = drm_agp_info(rdev->ddev, &info);
if (ret) {
158,7 → 156,13
}
 
mode.mode = info.mode;
/* chips with the agp to pcie bridge don't have the AGP_STATUS register
* Just use the whatever mode the host sets up.
*/
if (rdev->family <= CHIP_RV350)
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
else
agp_status = mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
 
if (is_v3) {
272,3 → 276,8
}
#endif
}
 
void radeon_agp_suspend(struct radeon_device *rdev)
{
radeon_agp_fini(rdev);
}
/drivers/video/drm/radeon/radeon_asic.c
0,0 → 1,752
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
 
//#include <linux/console.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
//#include <linux/vgaarb.h>
//#include <linux/vga_switcheroo.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
 
/*
* Registers accessors functions.
*/
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
BUG_ON(1);
return 0;
}
 
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
reg, v);
BUG_ON(1);
}
 
static void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
rdev->mc_wreg = &radeon_invalid_wreg;
rdev->pll_rreg = &radeon_invalid_rreg;
rdev->pll_wreg = &radeon_invalid_wreg;
rdev->pciep_rreg = &radeon_invalid_rreg;
rdev->pciep_wreg = &radeon_invalid_wreg;
 
/* Don't change order as we are overridding accessor. */
if (rdev->family < CHIP_RV515) {
rdev->pcie_reg_mask = 0xff;
} else {
rdev->pcie_reg_mask = 0x7ff;
}
/* FIXME: not sure here */
if (rdev->family <= CHIP_R580) {
rdev->pll_rreg = &r100_pll_rreg;
rdev->pll_wreg = &r100_pll_wreg;
}
if (rdev->family >= CHIP_R420) {
rdev->mc_rreg = &r420_mc_rreg;
rdev->mc_wreg = &r420_mc_wreg;
}
if (rdev->family >= CHIP_RV515) {
rdev->mc_rreg = &rv515_mc_rreg;
rdev->mc_wreg = &rv515_mc_wreg;
}
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
rdev->mc_rreg = &rs400_mc_rreg;
rdev->mc_wreg = &rs400_mc_wreg;
}
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
rdev->mc_rreg = &rs690_mc_rreg;
rdev->mc_wreg = &rs690_mc_wreg;
}
if (rdev->family == CHIP_RS600) {
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
}
 
 
/* helper to disable agp */
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
if (rdev->family >= CHIP_R600) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
} else if (rdev->family >= CHIP_RV515 ||
rdev->family == CHIP_RV380 ||
rdev->family == CHIP_RV410 ||
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
 
/*
* ASIC
*/
static struct radeon_asic r100_asic = {
.init = &r100_init,
// .fini = &r100_fini,
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = NULL,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic r200_asic = {
.init = &r100_init,
// .fini = &r100_fini,
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = NULL,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic r300_asic = {
.init = &r300_init,
// .fini = &r300_fini,
// .suspend = &r300_suspend,
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic r300_asic_pcie = {
.init = &r300_init,
// .fini = &r300_fini,
// .suspend = &r300_suspend,
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic r420_asic = {
.init = &r420_init,
// .fini = &r420_fini,
// .suspend = &r420_suspend,
// .resume = &r420_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic rs400_asic = {
.init = &rs400_init,
// .fini = &rs400_fini,
// .suspend = &rs400_suspend,
// .resume = &rs400_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
// .fini = &rs600_fini,
// .suspend = &rs600_suspend,
// .resume = &rs600_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs600_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic rs690_asic = {
.init = &rs690_init,
// .fini = &rs690_fini,
// .suspend = &rs690_suspend,
// .resume = &rs690_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r300_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic rv515_asic = {
.init = &rv515_init,
// .fini = &rv515_fini,
// .suspend = &rv515_suspend,
// .resume = &rv515_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic r520_asic = {
.init = &r520_init,
// .fini = &rv515_fini,
// .suspend = &rv515_suspend,
// .resume = &r520_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
static struct radeon_asic r600_asic = {
.init = &r600_init,
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
};
 
static struct radeon_asic rs780_asic = {
.init = &r600_init,
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
};
 
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
// .fini = &rv770_fini,
// .suspend = &rv770_suspend,
// .resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
};
#if 0
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = NULL,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = NULL,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
};
#endif
 
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
case CHIP_RS100:
case CHIP_RV200:
case CHIP_RS200:
rdev->asic = &r100_asic;
break;
case CHIP_R200:
case CHIP_RV250:
case CHIP_RS300:
case CHIP_RV280:
rdev->asic = &r200_asic;
break;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV380:
if (rdev->flags & RADEON_IS_PCIE)
rdev->asic = &r300_asic_pcie;
else
rdev->asic = &r300_asic;
break;
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->set_memory_clock = NULL;
}
break;
case CHIP_RS400:
case CHIP_RS480:
rdev->asic = &rs400_asic;
break;
case CHIP_RS600:
rdev->asic = &rs600_asic;
break;
case CHIP_RS690:
case CHIP_RS740:
rdev->asic = &rs690_asic;
break;
case CHIP_RV515:
rdev->asic = &rv515_asic;
break;
case CHIP_R520:
case CHIP_RV530:
case CHIP_RV560:
case CHIP_RV570:
case CHIP_R580:
rdev->asic = &r520_asic;
break;
case CHIP_R600:
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RV670:
rdev->asic = &r600_asic;
break;
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &rs780_asic;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
rdev->asic = &rv770_asic;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
 
if (rdev->flags & RADEON_IS_IGP) {
rdev->asic->get_memory_clock = NULL;
rdev->asic->set_memory_clock = NULL;
}
 
/* set the number of crtcs */
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else {
if (ASIC_IS_DCE41(rdev))
rdev->num_crtc = 2;
else if (ASIC_IS_DCE4(rdev))
rdev->num_crtc = 6;
else
rdev->num_crtc = 2;
}
 
return 0;
}
 
/drivers/video/drm/radeon/radeon_asic.h
45,14 → 45,21
/*
* r100,rv100,rs100,rv200,rs200
*/
extern int r100_init(struct radeon_device *rdev);
extern void r100_fini(struct radeon_device *rdev);
extern int r100_suspend(struct radeon_device *rdev);
extern int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
struct r100_mc_save {
u32 GENMO_WT;
u32 CRTC_EXT_CNTL;
u32 CRTC_GEN_CNTL;
u32 CRTC2_GEN_CNTL;
u32 CUR_OFFSET;
u32 CUR2_OFFSET;
};
int r100_init(struct radeon_device *rdev);
void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
int r100_gpu_reset(struct radeon_device *rdev);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
73,7 → 80,7
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
82,45 → 89,54
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
int r100_debugfs_rbbm_init(struct radeon_device *rdev);
int r100_debugfs_cp_init(struct radeon_device *rdev);
void r100_cp_disable(struct radeon_device *rdev);
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
void r100_cp_fini(struct radeon_device *rdev);
int r100_pci_gart_init(struct radeon_device *rdev);
void r100_pci_gart_fini(struct radeon_device *rdev);
int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
struct radeon_cp *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_init(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_vram_init_sizes(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
void r100_restore_sanity(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
struct radeon_bo *robj);
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
radeon_packet0_check_t check);
int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
void r100_enable_bm(struct radeon_device *rdev);
void r100_set_common_regs(struct radeon_device *rdev);
void r100_bm_disable(struct radeon_device *rdev);
extern bool r100_gui_idle(struct radeon_device *rdev);
extern void r100_pm_misc(struct radeon_device *rdev);
extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 
static struct radeon_asic r100_asic = {
.init = &r100_init,
// .fini = &r100_fini,
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r100_gpu_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = NULL,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
/*
* r200,rv250,rs300,rv280
*/
129,44 → 145,8
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
static struct radeon_asic r200_asic = {
.init = &r100_init,
// .fini = &r100_fini,
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r100_gpu_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = NULL,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
void r200_set_safe_registers(struct radeon_device *rdev);
 
 
/*
* r300,r350,rv350,rv380
*/
174,7 → 154,8
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern int r300_gpu_reset(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
181,87 → 162,18
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
extern void r300_mc_init(struct radeon_device *rdev);
extern void r300_clock_startup(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
 
static struct radeon_asic r300_asic = {
.init = &r300_init,
// .fini = &r300_fini,
// .suspend = &r300_suspend,
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
static struct radeon_asic r300_asic_pcie = {
.init = &r300_init,
// .fini = &r300_fini,
// .suspend = &r300_suspend,
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
/*
* r420,r423,rv410
*/
269,45 → 181,12
extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
static struct radeon_asic r420_asic = {
.init = &r420_init,
// .fini = &r420_fini,
// .suspend = &r420_suspend,
// .resume = &r420_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
extern void r420_pm_init_profile(struct radeon_device *rdev);
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
extern void r420_pipes_init(struct radeon_device *rdev);
 
 
/*
* rs400,rs480
*/
319,48 → 198,16
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
static struct radeon_asic rs400_asic = {
.init = &rs400_init,
// .fini = &rs400_fini,
// .suspend = &rs400_suspend,
// .resume = &rs400_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &r100_irq_set,
// .irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
int rs400_gart_init(struct radeon_device *rdev);
int rs400_gart_enable(struct radeon_device *rdev);
void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
 
 
/*
* rs600.
*/
extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
367,6 → 214,7
extern int rs600_resume(struct radeon_device *rdev);
int rs600_irq_set(struct radeon_device *rdev);
int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
378,46 → 226,15
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
 
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
// .fini = &rs600_fini,
// .suspend = &rs600_suspend,
// .resume = &rs600_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs600_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
/*
* rs690,rs740
*/
428,96 → 245,36
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs690_asic = {
.init = &rs690_init,
// .fini = &rs690_fini,
// .suspend = &rs690_suspend,
// .resume = &rs690_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r300_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2);
 
 
/*
* rv515
*/
struct rv515_mc_save {
u32 d1vga_control;
u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
u32 d1crtc_control;
u32 d2crtc_control;
};
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
static struct radeon_asic rv515_asic = {
.init = &rv515_init,
// .fini = &rv515_fini,
// .suspend = &rv515_suspend,
// .resume = &rv515_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &rv515_gpu_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
void rv515_vga_render_disable(struct radeon_device *rdev);
void rv515_set_safe_registers(struct radeon_device *rdev);
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_clock_startup(struct radeon_device *rdev);
void rv515_debugfs(struct radeon_device *rdev);
 
 
/*
525,43 → 282,6
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
static struct radeon_asic r520_asic = {
.init = &r520_init,
// .fini = &rv515_fini,
// .suspend = &rv515_suspend,
// .resume = &r520_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_reset = &rv515_gpu_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
// .ring_ib_execute = &r100_ring_ib_execute,
// .irq_set = &rs600_irq_set,
// .irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
// .copy_blit = &r100_copy_blit,
// .copy_dma = &r300_copy_dma,
// .copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
580,18 → 300,13
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
int r600_gpu_reset(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
603,43 → 318,58
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
int r600_blit_init(struct radeon_device *rdev);
void r600_blit_fini(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
void r600_irq_fini(struct radeon_device *rdev);
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
int r600_irq_set(struct radeon_device *rdev);
void r600_irq_suspend(struct radeon_device *rdev);
void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
int r600_audio_tmds_index(struct drm_encoder *encoder);
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
int r600_audio_channels(struct radeon_device *rdev);
int r600_audio_bits_per_sample(struct radeon_device *rdev);
int r600_audio_rate(struct radeon_device *rdev);
uint8_t r600_audio_status_bits(struct radeon_device *rdev);
uint8_t r600_audio_category_code(struct radeon_device *rdev);
void r600_audio_schedule_polling(struct radeon_device *rdev);
void r600_audio_enable_polling(struct drm_encoder *encoder);
void r600_audio_disable_polling(struct drm_encoder *encoder);
void r600_audio_fini(struct radeon_device *rdev);
void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
 
static struct radeon_asic r600_asic = {
.init = &r600_init,
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.gpu_reset = &r600_gpu_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
};
 
/*
* rv770,rv730,rv710,rv740
*/
647,90 → 377,67
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
int rv770_gpu_reset(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev);
u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
 
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
// .fini = &rv770_fini,
// .suspend = &rv770_suspend,
// .resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
.gpu_reset = &rv770_gpu_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
};
 
/*
* evergreen
*/
struct evergreen_mc_save {
u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
u32 crtc_control[6];
};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
int evergreen_gpu_reset(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
void evergreen_blit_fini(struct radeon_device *rdev);
/* evergreen blit */
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void evergreen_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
 
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = NULL,
.gpu_reset = &evergreen_gpu_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = NULL,
// .ring_ib_execute = &r600_ring_ib_execute,
// .irq_set = &r600_irq_set,
// .irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
// .copy_blit = &r600_copy_blit,
// .copy_dma = &r600_copy_blit,
// .copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
};
/*
* cayman
*/
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
bool cayman_gpu_is_lockup(struct radeon_device *rdev);
int cayman_asic_reset(struct radeon_device *rdev);
 
#endif
/drivers/video/drm/radeon/radeon_atombios.c
32,12 → 32,12
 
/* from radeon_encoder.c */
extern uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
uint32_t supported_device);
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device, u16 caps);
 
/* from radeon_connector.c */
extern void
46,13 → 46,14
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, uint32_t igp_lane_info,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd);
struct radeon_hpd *hpd,
struct radeon_router *router);
 
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
 
union atom_supported_devices {
69,20 → 70,42
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset;
int i;
uint16_t data_offset, size;
int i, num_indices;
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
 
atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
 
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
}
}
 
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
 
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
113,14 → 136,98
 
i2c.i2c_id = gpio->sucI2cId.ucAccess;
 
if (i2c.mask_clk_reg)
i2c.valid = true;
break;
}
}
}
 
return i2c;
}
 
void radeon_atombios_i2c_init(struct radeon_device *rdev)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
char stmp[32];
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
 
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
}
}
 
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
 
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
 
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
 
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
 
i2c.i2c_id = gpio->sucI2cId.ucAccess;
 
if (i2c.mask_clk_reg) {
i2c.valid = true;
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
}
}
}
 
static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
135,22 → 242,23
memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
gpio.valid = false;
 
atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
 
for (i = 0; i < num_indices; i++) {
pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = pin->usGpioPin_AIndex * 4;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
}
}
}
 
return gpio;
}
161,6 → 269,8
struct radeon_hpd hpd;
u32 reg;
 
memset(&hpd, 0, sizeof(struct radeon_hpd));
 
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
222,6 → 332,15
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
 
/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
if ((dev->pdev->device == 0x796e) &&
(dev->pdev->subsystem_vendor == 0x1462) &&
(dev->pdev->subsystem_device == 0x7302)) {
if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
return false;
}
 
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&
264,8 → 383,17
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return false;
if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
*line_mux = 0x90;
}
 
/* mac rv630, rv730, others */
if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
(*connector_type == DRM_MODE_CONNECTOR_DVII)) {
*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
}
 
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
275,6 → 403,15
}
}
 
/* ASUS HD 3600 board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
(dev->pdev->subsystem_device == 0x01e4)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
 
/* ASUS HD 3450 board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x95C5) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
294,14 → 431,25
}
}
 
/* Acer laptop reports DVI-D as DVI-I */
if ((dev->pdev->device == 0x95c4) &&
/* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
* on the laptop and a DVI port on the docking station and
* both share the same encoder, hpd pin, and ddc line.
* So while the bios table is technically correct,
* we drop the DVI port here since xrandr has no concept of
* encoders and will try and drive both connectors
* with different crtcs which isn't possible on the hardware
* side and leaves no crtcs for LVDS or VGA.
*/
if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT))
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
/* actually it's a DVI-D port not DVI-I */
*connector_type = DRM_MODE_CONNECTOR_DVID;
return false;
}
}
 
/* XFX Pine Group device rv730 reports no VGA DDC lines
* even though they are wired up to record 0x93
385,19 → 533,19
u16 size, data_offset;
u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
ATOM_ENCODER_OBJECT_TABLE *enc_obj;
ATOM_OBJECT_TABLE *router_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support;
int i, j, k, path_size, device_support;
int connector_type;
u16 igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
struct radeon_router router;
struct radeon_gpio_rec gpio;
struct radeon_hpd hpd;
 
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
 
if (data_offset == 0)
if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
return false;
 
if (crev < 2)
410,6 → 558,12
con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usConnectorObjectTableOffset));
enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usEncoderObjectTableOffset));
router_obj = (ATOM_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usRouterObjectTableOffset));
device_support = le16_to_cpu(obj_header->usDeviceSupport);
 
path_size = 0;
419,7 → 573,7
addr += path_size;
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
linkb = false;
 
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
 
449,8 → 603,8
GetIndexIntoMasterTable(DATA,
IntegratedSystemInfo);
 
atom_parse_data_header(ctx, index, &size, &frev,
&crev, &igp_offset);
if (atom_parse_data_header(ctx, index, &size, &frev,
&crev, &igp_offset)) {
 
if (crev >= 2) {
igp_obj =
486,41 → 640,132
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
}
} else {
igp_lane_info = 0;
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
}
 
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
continue;
 
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
j++) {
uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
 
enc_obj_id =
grph_obj_id =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
enc_obj_num =
grph_obj_num =
(le16_to_cpu(path->usGraphicObjIds[j]) &
ENUM_ID_MASK) >> ENUM_ID_SHIFT;
enc_obj_type =
grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
/* FIXME: add support for router objects */
if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
if (enc_obj_num == 2)
linkb = true;
else
linkb = false;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
(ctx->bios + data_offset +
le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
ATOM_ENCODER_CAP_RECORD *cap_record;
u16 caps = 0;
 
while (record->ucRecordSize > 0 &&
record->ucRecordType > 0 &&
record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record->ucRecordType) {
case ATOM_ENCODER_CAP_RECORD_TYPE:
cap_record =(ATOM_ENCODER_CAP_RECORD *)
record;
caps = le16_to_cpu(cap_record->usEncoderCap);
break;
}
record = (ATOM_COMMON_RECORD_HEADER *)
((char *)record + record->ucRecordSize);
}
radeon_add_atom_encoder(dev,
enc_obj_id,
encoder_obj,
le16_to_cpu
(path->
usDeviceTag));
usDeviceTag),
caps);
}
}
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
ATOM_I2C_RECORD *i2c_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
int enum_id;
 
router.router_id = router_obj_id;
for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
enum_id++) {
if (le16_to_cpu(path->usConnObjectId) ==
le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
break;
}
 
while (record->ucRecordSize > 0 &&
record->ucRecordType > 0 &&
record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
i2c_record =
(ATOM_I2C_RECORD *)
record;
i2c_config =
(ATOM_I2C_ID_CONFIG_ACCESS *)
&i2c_record->sucI2cId;
router.i2c_info =
radeon_lookup_i2c_gpio(rdev,
i2c_config->
ucAccess);
router.i2c_addr = i2c_record->ucI2CAddr >> 1;
break;
case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
record;
router.ddc_valid = true;
router.ddc_mux_type = ddc_path->ucMuxType;
router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
break;
case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
record;
router.cd_valid = true;
router.cd_mux_type = cd_path->ucMuxType;
router.cd_mux_control_pin = cd_path->ucMuxControlPin;
router.cd_mux_state = cd_path->ucMuxState[enum_id];
break;
}
record = (ATOM_COMMON_RECORD_HEADER *)
((char *)record + record->ucRecordSize);
}
}
}
}
}
 
/* look up gpio for ddc, hpd */
ddc_bus.valid = false;
hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
538,12 → 783,10
ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
hpd.hpd = RADEON_HPD_NONE;
 
while (record->ucRecordType > 0
&& record->
ucRecordType <=
ATOM_MAX_OBJECT_RECORD_NUMBER) {
while (record->ucRecordSize > 0 &&
record->ucRecordType > 0 &&
record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
i2c_record =
576,13 → 819,10
break;
}
}
} else {
hpd.hpd = RADEON_HPD_NONE;
ddc_bus.valid = false;
}
 
/* needed for aux chan transactions */
ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
ddc_bus.hpd = hpd.hpd;
 
conn_id = le16_to_cpu(path->usConnObjectId);
 
596,9 → 836,10
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
linkb, igp_lane_info,
igp_lane_info,
connector_object_id,
&hpd);
&hpd,
&router);
 
}
}
627,7 → 868,7
uint8_t frev, crev;
ATOM_XTMDS_INFO *xtmds;
 
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
 
if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
641,6 → 882,9
else
return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
}
} else
return supported_devices_connector_object_id_convert
[connector_type];
} else {
return supported_devices_connector_object_id_convert
[connector_type];
670,10 → 914,23
uint8_t dac;
union atom_supported_devices *supported_devices;
int i, j, max_device;
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
struct bios_connector *bios_connectors;
size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
struct radeon_router router;
 
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
router.ddc_valid = false;
router.cd_valid = false;
 
bios_connectors = kzalloc(bc_size, GFP_KERNEL);
if (!bios_connectors)
return false;
 
if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
&data_offset)) {
kfree(bios_connectors);
return false;
}
 
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
 
695,7 → 952,7
}
 
if (i == ATOM_DEVICE_CV_INDEX) {
DRM_DEBUG("Skipping Component Video\n");
DRM_DEBUG_KMS("Skipping Component Video\n");
continue;
}
 
769,13 → 1026,14
 
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
radeon_add_atom_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
(1 << i),
0);
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
832,14 → 1090,16
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
false, 0,
0,
connector_object_id,
&bios_connectors[i].hpd);
&bios_connectors[i].hpd,
&router);
}
}
 
radeon_link_encoder_connector(dev);
 
kfree(bios_connectors);
return true;
}
 
849,6 → 1109,7
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
ATOM_FIRMWARE_INFO_V2_1 info_21;
ATOM_FIRMWARE_INFO_V2_2 info_22;
};
 
bool radeon_atom_get_clock_info(struct drm_device *dev)
865,14 → 1126,11
struct radeon_pll *mpll = &rdev->clock.mpll;
uint16_t data_offset;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
&crev, &data_offset);
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
 
if (firmware_info) {
/* pixel clocks */
p1pll->reference_freq =
le16_to_cpu(firmware_info->info.usReferenceClock);
887,22 → 1145,25
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
 
if (crev >= 4) {
p1pll->lcd_pll_out_min =
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_min == 0)
p1pll->lcd_pll_out_min = p1pll->pll_out_min;
p1pll->lcd_pll_out_max =
le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_max == 0)
p1pll->lcd_pll_out_max = p1pll->pll_out_max;
} else {
p1pll->lcd_pll_out_min = p1pll->pll_out_min;
p1pll->lcd_pll_out_max = p1pll->pll_out_max;
}
 
if (p1pll->pll_out_min == 0) {
if (ASIC_IS_AVIVO(rdev))
p1pll->pll_out_min = 64800;
else
p1pll->pll_out_min = 20000;
} else if (p1pll->pll_out_min > 64800) {
/* Limiting the pll output range is a good thing generally as
* it limits the number of possible pll combinations for a given
* frequency presumably to the ones that work best on each card.
* However, certain duallink DVI monitors seem to like
* pll combinations that would be limited by this at least on
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family.
*/
if (!radeon_new_pll)
p1pll->pll_out_min = 64800;
}
 
p1pll->pll_in_min =
913,7 → 1174,11
*p2pll = *p1pll;
 
/* system clock */
if (ASIC_IS_DCE4(rdev))
spll->reference_freq =
le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
else
spll->reference_freq =
le16_to_cpu(firmware_info->info.usReferenceClock);
spll->reference_div = 0;
 
936,7 → 1201,11
le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
 
/* memory clock */
if (ASIC_IS_DCE4(rdev))
mpll->reference_freq =
le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
else
mpll->reference_freq =
le16_to_cpu(firmware_info->info.usReferenceClock);
mpll->reference_div = 0;
 
966,13 → 1235,21
if (ASIC_IS_DCE4(rdev)) {
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (rdev->clock.default_dispclk == 0)
if (rdev->clock.default_dispclk == 0) {
if (ASIC_IS_DCE5(rdev))
rdev->clock.default_dispclk = 54000; /* 540 Mhz */
else
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
}
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
}
*dcpll = *p1pll;
 
rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (rdev->clock.max_pixel_clock == 0)
rdev->clock.max_pixel_clock = 40000;
 
return true;
}
 
992,20 → 1269,21
u8 frev, crev;
u16 data_offset;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
&crev, &data_offset);
/* sideport is AMD only */
if (rdev->family == CHIP_RS600)
return false;
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
 
if (igp_info) {
switch (crev) {
case 1:
if (igp_info->info.ucMemoryType & 0xf0)
if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
return true;
break;
case 2:
if (igp_info->info_2.ucMemoryType & 0x0f)
if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
return true;
break;
default:
1029,14 → 1307,12
uint16_t maxfreq;
int i;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
&crev, &data_offset);
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
tmds_info =
(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
data_offset);
 
if (tmds_info) {
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
for (i = 0; i < 4; i++) {
tmds->tmds_pll[i].freq =
1053,7 → 1329,7
(tmds_info->asMiscInfo[i].
ucPLL_VoltageSwing & 0xf) << 16;
 
DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
 
1067,38 → 1343,27
return false;
}
 
static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
radeon_encoder
*encoder,
bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
uint16_t data_offset;
uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL;
int i;
int i, num_indices;
 
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
&crev, &data_offset);
 
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
ss_info =
(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
 
if (ss_info) {
ss =
kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
 
if (!ss)
return NULL;
 
for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
for (i = 0; i < num_indices; i++) {
if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->percentage =
le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
1107,36 → 1372,128
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
break;
return true;
}
}
}
return ss;
return false;
}
 
static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
struct radeon_encoder_atom_dig *lvds)
static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
u8 frev, crev;
u16 percentage = 0, rate = 0;
 
/* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1179) &&
(dev->pdev->subsystem_device == 0xff50)) {
if ((lvds->native_mode.hdisplay == 1280) &&
(lvds->native_mode.vdisplay == 800))
lvds->pll_algo = PLL_ALGO_LEGACY;
/* get any igp specific overrides */
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
(mode_info->atom_context->bios + data_offset);
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->usDVISSPercentage);
rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
break;
}
if (percentage)
ss->percentage = percentage;
if (rate)
ss->rate = rate;
}
}
 
/* Dell Studio 15 laptop panel doesn't like new pll divider algo */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1028) &&
(dev->pdev->subsystem_device == 0x029f)) {
if ((lvds->native_mode.hdisplay == 1280) &&
(lvds->native_mode.vdisplay == 800))
lvds->pll_algo = PLL_ALGO_LEGACY;
union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO info;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
};
 
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
uint16_t data_offset, size;
union asic_ss_info *ss_info;
uint8_t frev, crev;
int i, num_indices;
 
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
 
ss_info =
(union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
 
switch (frev) {
case 1:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
 
for (i = 0; i < num_indices; i++) {
if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
return true;
}
}
break;
case 2:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
return true;
}
}
break;
case 3:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
if (rdev->flags & RADEON_IS_IGP)
radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
}
break;
default:
DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
break;
}
 
}
return false;
}
 
union lvds_info {
struct _ATOM_LVDS_INFO info;
1155,14 → 1512,12
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
&crev, &data_offset);
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
lvds_info =
(union lvds_info *)(mode_info->atom_context->bios + data_offset);
 
if (lvds_info) {
lvds =
kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
1184,12 → 1539,12
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
 
misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
if (misc & ATOM_VSYNC_POLARITY)
1203,28 → 1558,84
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
 
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
 
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
 
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll == 0)
lvds->pll_algo = PLL_ALGO_LEGACY;
encoder->native_mode = lvds->native_mode;
 
if (encoder_enum == 2)
lvds->linkb = true;
else
lvds->pll_algo = PLL_ALGO_NEW;
} else {
if (radeon_new_pll == 1)
lvds->pll_algo = PLL_ALGO_NEW;
lvds->linkb = false;
 
/* parse the lcd record table */
if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
bool bad_record = false;
u8 *record;
 
if ((frev == 1) && (crev < 2))
/* absolute */
record = (u8 *)(mode_info->atom_context->bios +
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
else
lvds->pll_algo = PLL_ALGO_LEGACY;
}
/* relative */
record = (u8 *)(mode_info->atom_context->bios +
data_offset +
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
while (*record != ATOM_RECORD_END_TYPE) {
switch (*record) {
case LCD_MODE_PATCH_RECORD_MODE_TYPE:
record += sizeof(ATOM_PATCH_RECORD_MODE);
break;
case LCD_RTS_RECORD_TYPE:
record += sizeof(ATOM_LCD_RTS_RECORD);
break;
case LCD_CAP_RECORD_TYPE:
record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
break;
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
struct edid *edid;
int edid_size =
max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
edid = kmalloc(edid_size, GFP_KERNEL);
if (edid) {
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
 
/* LVDS quirks */
radeon_atom_apply_lvds_quirks(dev, lvds);
 
encoder->native_mode = lvds->native_mode;
if (drm_edid_is_valid(edid)) {
rdev->mode_info.bios_hardcoded_edid = edid;
rdev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
kfree(edid);
}
}
record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
lvds->native_mode.width_mm = panel_res_record->usHSize;
lvds->native_mode.height_mm = panel_res_record->usVSize;
record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
break;
default:
DRM_ERROR("Bad LCD record %d\n", *record);
bad_record = true;
break;
}
if (bad_record)
break;
}
}
}
return lvds;
}
 
1241,11 → 1652,11
uint8_t bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
dac_info = (struct _COMPASSIONATE_DATA *)
(mode_info->atom_context->bios + data_offset);
 
dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
 
if (dac_info) {
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
 
if (!p_dac)
1270,12 → 1681,14
u8 frev, crev;
u16 data_offset, misc;
 
atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
&frev, &crev, &data_offset))
return false;
 
switch (crev) {
case 1:
tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
if (index > MAX_SUPPORTED_TV_TIMING)
if (index >= MAX_SUPPORTED_TV_TIMING)
return false;
 
mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
1313,7 → 1726,7
break;
case 2:
tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
if (index > MAX_SUPPORTED_TV_TIMING_V1_2)
if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
return false;
 
dtd_timings = &tv_info_v1_2->aModeTimings[index];
1362,48 → 1775,51
struct _ATOM_ANALOG_TV_INFO *tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
 
tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
tv_info = (struct _ATOM_ANALOG_TV_INFO *)
(mode_info->atom_context->bios + data_offset);
 
switch (tv_info->ucTV_BootUpDefaultStandard) {
case ATOM_TV_NTSC:
tv_std = TV_STD_NTSC;
DRM_INFO("Default TV standard: NTSC\n");
DRM_DEBUG_KMS("Default TV standard: NTSC\n");
break;
case ATOM_TV_NTSCJ:
tv_std = TV_STD_NTSC_J;
DRM_INFO("Default TV standard: NTSC-J\n");
DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
break;
case ATOM_TV_PAL:
tv_std = TV_STD_PAL;
DRM_INFO("Default TV standard: PAL\n");
DRM_DEBUG_KMS("Default TV standard: PAL\n");
break;
case ATOM_TV_PALM:
tv_std = TV_STD_PAL_M;
DRM_INFO("Default TV standard: PAL-M\n");
DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
break;
case ATOM_TV_PALN:
tv_std = TV_STD_PAL_N;
DRM_INFO("Default TV standard: PAL-N\n");
DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
break;
case ATOM_TV_PALCN:
tv_std = TV_STD_PAL_CN;
DRM_INFO("Default TV standard: PAL-CN\n");
DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
break;
case ATOM_TV_PAL60:
tv_std = TV_STD_PAL_60;
DRM_INFO("Default TV standard: PAL-60\n");
DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
break;
case ATOM_TV_SECAM:
tv_std = TV_STD_SECAM;
DRM_INFO("Default TV standard: SECAM\n");
DRM_DEBUG_KMS("Default TV standard: SECAM\n");
break;
default:
tv_std = TV_STD_NTSC;
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
break;
}
}
return tv_std;
}
 
1420,11 → 1836,12
uint8_t bg, dac;
struct radeon_encoder_tv_dac *tv_dac = NULL;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
 
dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
dac_info = (struct _COMPASSIONATE_DATA *)
(mode_info->atom_context->bios + data_offset);
 
if (dac_info) {
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
 
if (!tv_dac)
1447,37 → 1864,137
return tv_dac;
}
 
static const char *thermal_controller_names[] = {
"NONE",
"lm63",
"adm1032",
"adm1030",
"max6649",
"lm64",
"f75375",
"asc7xxx",
};
 
static const char *pp_lib_thermal_controller_names[] = {
"NONE",
"lm63",
"adm1032",
"adm1030",
"max6649",
"lm64",
"f75375",
"RV6xx",
"RV770",
"adt7473",
"NONE",
"External GPIO",
"Evergreen",
"emc2103",
"Sumo",
"Northern Islands",
};
 
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
};
 
void radeon_atombios_get_power_modes(struct radeon_device *rdev)
union pplib_clock_info {
struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
};
 
union pplib_power_state {
struct _ATOM_PPLIB_STATE v1;
struct _ATOM_PPLIB_STATE_V2 v2;
};
 
static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
int state_index,
u32 misc, u32 misc2)
{
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_POWERSAVE;
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
rdev->pm.power_state[state_index].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
}
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
} else if (state_index == 0) {
rdev->pm.power_state[state_index].clock_info[0].flags |=
RADEON_PM_MODE_NO_DISPLAY;
}
}
 
static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
u32 misc, misc2 = 0;
int num_modes = 0, i;
int state_index = 0;
struct radeon_i2c_bus_rec i2c_bus;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
u32 misc, misc2 = 0, sclk, mclk;
union power_info *power_info;
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
struct _ATOM_PPLIB_STATE *power_state;
int num_modes = 0, i, j;
int state_index = 0, mode_index = 0;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
 
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return state_index;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
rdev->pm.default_power_state = NULL;
 
if (power_info) {
if (frev < 4) {
/* add the i2c bus for thermal/fan chip */
if (power_info->info.ucOverdriveThermalController > 0) {
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
thermal_controller_names[power_info->info.ucOverdriveThermalController],
power_info->info.ucOverdriveControllerAddress >> 1);
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = thermal_controller_names[power_info->info.
ucOverdriveThermalController];
info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
1491,16 → 2008,11
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
/* skip overclock modes for now */
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
continue;
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
(misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1518,29 → 2030,8
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_POWERSAVE;
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
}
rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
state_index++;
break;
case 2:
1553,17 → 2044,12
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
/* skip overclock modes for now */
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
continue;
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
(misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1581,32 → 2067,8
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_POWERSAVE;
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
}
rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
state_index++;
break;
case 3:
1619,17 → 2081,12
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
/* skip overclock modes for now */
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
continue;
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
(misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1653,142 → 2110,412
power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
}
}
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
state_index++;
break;
}
}
/* last mode is usually default */
if (rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0];
rdev->pm.power_state[state_index].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
rdev->pm.power_state[state_index].misc = 0;
rdev->pm.power_state[state_index].misc2 = 0;
}
return state_index;
}
 
static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
ATOM_PPLIB_THERMALCONTROLLER *controller)
{
struct radeon_i2c_bus_rec i2c_bus;
 
/* add the i2c bus for thermal/fan chip */
if (controller->ucType > 0) {
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
(controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
DRM_INFO("Special thermal controller config\n");
} else {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
}
}
 
static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
u8 frev, crev;
u16 data_offset;
union firmware_info *firmware_info;
 
*vddc = 0;
*vddci = 0;
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
if ((frev == 2) && (crev >= 2))
*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
}
}
 
static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
int state_index, int mode_index,
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
{
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
u16 vddc, vddci;
 
radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
 
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
rdev->pm.power_state[state_index].pcie_lanes =
((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_POWERSAVE;
if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
break;
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
break;
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
break;
case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
POWER_STATE_TYPE_PERFORMANCE;
break;
}
rdev->pm.power_state[state_index].flags = 0;
if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
rdev->pm.power_state[state_index].flags |=
RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
if (ASIC_IS_DCE5(rdev)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
rdev->pm.power_state[state_index].clock_info[j].mclk =
rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[j].sclk =
rdev->clock.default_sclk;
if (vddc)
rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
vddc;
}
state_index++;
break;
}
}
} else if (frev == 4) {
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
mode_index = 0;
power_state = (struct _ATOM_PPLIB_STATE *)
(mode_info->atom_context->bios +
data_offset +
le16_to_cpu(power_info->info_4.usStateArrayOffset) +
i * power_info->info_4.ucStateEntrySize);
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
(mode_info->atom_context->bios +
data_offset +
le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
(power_state->ucNonClockStateIndex *
power_info->info_4.ucNonClockSize));
for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
}
 
static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
int state_index, int mode_index,
union pplib_clock_info *clock_info)
{
u32 sclk, mclk;
 
if (rdev->flags & RADEON_IS_IGP) {
struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
(struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
(mode_info->atom_context->bios +
data_offset +
le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
(power_state->ucClockStateIndices[j] *
power_info->info_4.ucClockInfoSize));
sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
sclk |= clock_info->ucLowEngineClockHigh << 16;
if (rdev->family >= CHIP_PALM) {
sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
sclk |= clock_info->sumo.ucEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
continue;
/* skip overclock modes for now */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
continue;
} else {
sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
} else if (ASIC_IS_DCE4(rdev)) {
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
clock_info->usVDDC;
mode_index++;
le16_to_cpu(clock_info->evergreen.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->evergreen.usVDDCI);
} else {
struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
(struct _ATOM_PPLIB_R600_CLOCK_INFO *)
(mode_info->atom_context->bios +
data_offset +
le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
(power_state->ucClockStateIndices[j] *
power_info->info_4.ucClockInfoSize));
sclk = le16_to_cpu(clock_info->usEngineClockLow);
sclk |= clock_info->ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->usMemoryClockLow);
mclk |= clock_info->ucMemoryClockHigh << 16;
sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
sclk |= clock_info->r600.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
mclk |= clock_info->r600.ucMemoryClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->r600.usVDDC);
}
 
if (rdev->flags & RADEON_IS_IGP) {
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
return false;
} else {
/* skip invalid modes */
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
/* skip overclock modes for now */
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
clock_info->usVDDC;
return false;
}
return true;
}
 
static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
union pplib_power_state *power_state;
int i, j;
int state_index = 0, mode_index = 0;
union pplib_clock_info *clock_info;
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
 
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return state_index;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
/* first mode is usually default, followed by low to high */
for (i = 0; i < power_info->pplib.ucNumStates; i++) {
mode_index = 0;
power_state = (union pplib_power_state *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset) +
i * power_info->pplib.ucStateEntrySize);
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
(power_state->v1.ucClockStateIndices[j] *
power_info->pplib.ucClockInfoSize));
valid = radeon_atombios_parse_pplib_clock_info(rdev,
state_index, mode_index,
clock_info);
if (valid)
mode_index++;
}
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
misc2 = le16_to_cpu(non_clock_info->usClassification);
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
break;
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
break;
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
break;
radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
non_clock_info);
state_index++;
}
if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
rdev->pm.power_state[state_index].type =
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
if (rdev->pm.power_state[i].num_clock_modes > 1)
rdev->pm.power_state[i].clock_info[0].flags |=
RADEON_PM_MODE_NO_DISPLAY;
}
/* first mode is usually default */
if (rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[0].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
rdev->pm.default_power_state_index = 0;
rdev->pm.power_state[0].default_clock_mode =
&rdev->pm.power_state[0].clock_info[0];
}
return state_index;
}
 
static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
union pplib_power_state *power_state;
int i, j, non_clock_array_index, clock_array_index;
int state_index = 0, mode_index = 0;
union pplib_clock_info *clock_info;
struct StateArray *state_array;
struct ClockInfoArray *clock_info_array;
struct NonClockInfoArray *non_clock_info_array;
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
 
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return state_index;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
state_array = (struct StateArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset));
clock_info_array = (struct ClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
non_clock_info_array = (struct NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
for (i = 0; i < state_array->ucNumEntries; i++) {
mode_index = 0;
power_state = (union pplib_power_state *)&state_array->states[i];
/* XXX this might be an inagua bug... */
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
/* XXX this might be an inagua bug... */
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
state_index, mode_index,
clock_info);
if (valid)
mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
non_clock_info);
state_index++;
}
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
if (rdev->pm.power_state[i].num_clock_modes > 1)
rdev->pm.power_state[i].clock_info[0].flags |=
RADEON_PM_MODE_NO_DISPLAY;
}
} else {
/* XXX figure out some good default low power mode for cards w/out power tables */
/* first mode is usually default */
if (rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[0].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = 0;
rdev->pm.power_state[0].default_clock_mode =
&rdev->pm.power_state[0].clock_info[0];
}
return state_index;
}
 
if (rdev->pm.default_power_state == NULL) {
void radeon_atombios_get_power_modes(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
int state_index = 0;
 
rdev->pm.default_power_state_index = -1;
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
switch (frev) {
case 1:
case 2:
case 3:
state_index = radeon_atombios_parse_power_table_1_3(rdev);
break;
case 4:
case 5:
state_index = radeon_atombios_parse_power_table_4_5(rdev);
break;
case 6:
state_index = radeon_atombios_parse_power_table_6(rdev);
break;
default:
break;
}
} else {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
1798,18 → 2525,18
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if (rdev->asic->get_pcie_lanes)
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
else
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
rdev->pm.power_state[state_index].pcie_lanes = 16;
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].flags = 0;
state_index++;
}
}
 
rdev->pm.num_power_states = state_index;
 
rdev->pm.current_power_state = rdev->pm.default_power_state;
rdev->pm.current_clock_mode =
rdev->pm.default_power_state->default_clock_mode;
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
}
 
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
1828,7 → 2555,7
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
return args.ulReturnEngineClock;
return le32_to_cpu(args.ulReturnEngineClock);
}
 
uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
1837,7 → 2564,7
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
return args.ulReturnMemoryClock;
return le32_to_cpu(args.ulReturnMemoryClock);
}
 
void radeon_atom_set_engine_clock(struct radeon_device *rdev,
1846,7 → 2573,7
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
 
args.ulTargetEngineClock = eng_clock; /* 10 khz */
args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
1860,11 → 2587,47
if (rdev->flags & RADEON_IS_IGP)
return;
 
args.ulTargetMemoryClock = mem_clock; /* 10 khz */
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
union set_voltage {
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
};
 
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev, volt_index = voltage_level;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (crev) {
case 1:
args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
 
 
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
1882,7 → 2645,7
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
 
/* tell the bios not to handle mode switching */
bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
1933,10 → 2696,13
else
bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
 
if (lock)
if (lock) {
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
else
bios_6_scratch &= ~ATOM_S6_ACC_MODE;
} else {
bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
bios_6_scratch |= ATOM_S6_ACC_MODE;
}
 
if (rdev->family >= CHIP_R600)
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
1970,11 → 2736,11
if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
if (connected) {
DRM_DEBUG("TV1 connected\n");
DRM_DEBUG_KMS("TV1 connected\n");
bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
} else {
DRM_DEBUG("TV1 disconnected\n");
DRM_DEBUG_KMS("TV1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_TV1_MASK;
bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
1983,11 → 2749,11
if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
if (connected) {
DRM_DEBUG("CV connected\n");
DRM_DEBUG_KMS("CV connected\n");
bios_3_scratch |= ATOM_S3_CV_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
} else {
DRM_DEBUG("CV disconnected\n");
DRM_DEBUG_KMS("CV disconnected\n");
bios_0_scratch &= ~ATOM_S0_CV_MASK;
bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
1996,12 → 2762,12
if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
DRM_DEBUG("LCD1 connected\n");
DRM_DEBUG_KMS("LCD1 connected\n");
bios_0_scratch |= ATOM_S0_LCD1;
bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
} else {
DRM_DEBUG("LCD1 disconnected\n");
DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_LCD1;
bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
2010,12 → 2776,12
if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
DRM_DEBUG("CRT1 connected\n");
DRM_DEBUG_KMS("CRT1 connected\n");
bios_0_scratch |= ATOM_S0_CRT1_COLOR;
bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
} else {
DRM_DEBUG("CRT1 disconnected\n");
DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
2024,12 → 2790,12
if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
DRM_DEBUG("CRT2 connected\n");
DRM_DEBUG_KMS("CRT2 connected\n");
bios_0_scratch |= ATOM_S0_CRT2_COLOR;
bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
} else {
DRM_DEBUG("CRT2 disconnected\n");
DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
2038,12 → 2804,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP1 connected\n");
DRM_DEBUG_KMS("DFP1 connected\n");
bios_0_scratch |= ATOM_S0_DFP1;
bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
} else {
DRM_DEBUG("DFP1 disconnected\n");
DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP1;
bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
2052,12 → 2818,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP2 connected\n");
DRM_DEBUG_KMS("DFP2 connected\n");
bios_0_scratch |= ATOM_S0_DFP2;
bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
} else {
DRM_DEBUG("DFP2 disconnected\n");
DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP2;
bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
2066,12 → 2832,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP3 connected\n");
DRM_DEBUG_KMS("DFP3 connected\n");
bios_0_scratch |= ATOM_S0_DFP3;
bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
} else {
DRM_DEBUG("DFP3 disconnected\n");
DRM_DEBUG_KMS("DFP3 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP3;
bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
2080,12 → 2846,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP4 connected\n");
DRM_DEBUG_KMS("DFP4 connected\n");
bios_0_scratch |= ATOM_S0_DFP4;
bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
} else {
DRM_DEBUG("DFP4 disconnected\n");
DRM_DEBUG_KMS("DFP4 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP4;
bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
2094,12 → 2860,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP5 connected\n");
DRM_DEBUG_KMS("DFP5 connected\n");
bios_0_scratch |= ATOM_S0_DFP5;
bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
} else {
DRM_DEBUG("DFP5 disconnected\n");
DRM_DEBUG_KMS("DFP5 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP5;
bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
/drivers/video/drm/radeon/radeon_bios.c
30,6 → 30,8
#include "radeon.h"
#include "atom.h"
 
//#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
/*
* BIOS.
*/
46,8 → 48,12
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
 
if (!(rdev->flags & RADEON_IS_IGP))
if (!radeon_card_posted(rdev))
return false;
 
rdev->bios = NULL;
vram_base = drm_get_resource_start(rdev->ddev, 0);
vram_base = pci_resource_start(rdev->pdev, 0);
bios = ioremap(vram_base, size);
if (!bios) {
return false;
125,6 → 131,46
}
return true;
}
 
static bool ni_read_disabled_bios(struct radeon_device *rdev)
{
u32 bus_cntl;
u32 d1vga_control;
u32 d2vga_control;
u32 vga_render_control;
u32 rom_cntl;
bool r;
 
bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
rom_cntl = RREG32(R600_ROM_CNTL);
 
/* enable the rom */
WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
WREG32(AVIVO_D2VGA_CONTROL,
(d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
WREG32(AVIVO_VGA_RENDER_CONTROL,
(vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
 
r = radeon_read_bios(rdev);
 
/* restore regs */
WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
}
 
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
138,7 → 184,7
bool r;
 
viph_control = RREG32(RADEON_VIPH_CONTROL);
bus_cntl = RREG32(RADEON_BUS_CNTL);
bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
147,7 → 193,7
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
186,7 → 232,7
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
}
WREG32(RADEON_VIPH_CONTROL, viph_control);
WREG32(RADEON_BUS_CNTL, bus_cntl);
WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
211,7 → 257,7
bool r;
 
viph_control = RREG32(RADEON_VIPH_CONTROL);
bus_cntl = RREG32(RADEON_BUS_CNTL);
bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
226,7 → 272,7
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
257,7 → 303,7
 
/* restore regs */
WREG32(RADEON_VIPH_CONTROL, viph_control);
WREG32(RADEON_BUS_CNTL, bus_cntl);
WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
410,6 → 456,8
{
if (rdev->flags & RADEON_IS_IGP)
return igp_read_bios_from_vram(rdev);
else if (rdev->family >= CHIP_BARTS)
return ni_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_RV770)
return r700_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_R600)
/drivers/video/drm/radeon/radeon_clocks.c
91,6 → 91,87
return mclk;
}
 
#ifdef CONFIG_OF
/*
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
*/
static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct device_node *dp = rdev->pdev->dev.of_node;
const u32 *val;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
 
if (dp == NULL)
return false;
val = of_get_property(dp, "ATY,RefCLK", NULL);
if (!val || !*val) {
printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
return false;
}
p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
if (p1pll->reference_div < 2)
p1pll->reference_div = 12;
p2pll->reference_div = p1pll->reference_div;
 
/* These aren't in the device-tree */
if (rdev->family >= CHIP_R420) {
p1pll->pll_in_min = 100;
p1pll->pll_in_max = 1350;
p1pll->pll_out_min = 20000;
p1pll->pll_out_max = 50000;
p2pll->pll_in_min = 100;
p2pll->pll_in_max = 1350;
p2pll->pll_out_min = 20000;
p2pll->pll_out_max = 50000;
} else {
p1pll->pll_in_min = 40;
p1pll->pll_in_max = 500;
p1pll->pll_out_min = 12500;
p1pll->pll_out_max = 35000;
p2pll->pll_in_min = 40;
p2pll->pll_in_max = 500;
p2pll->pll_out_min = 12500;
p2pll->pll_out_max = 35000;
}
/* not sure what the max should be in all cases */
rdev->clock.max_pixel_clock = 35000;
 
spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
spll->reference_div = mpll->reference_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
RADEON_M_SPLL_REF_DIV_MASK;
 
val = of_get_property(dp, "ATY,SCLK", NULL);
if (val && *val)
rdev->clock.default_sclk = (*val) / 10;
else
rdev->clock.default_sclk =
radeon_legacy_get_engine_clock(rdev);
 
val = of_get_property(dp, "ATY,MCLK", NULL);
if (val && *val)
rdev->clock.default_mclk = (*val) / 10;
else
rdev->clock.default_mclk =
radeon_legacy_get_memory_clock(rdev);
 
DRM_INFO("Using device-tree clock info\n");
 
return true;
}
#else
static bool radeon_read_clocks_OF(struct drm_device *dev)
{
return false;
}
#endif /* CONFIG_OF */
 
void radeon_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
105,6 → 186,8
ret = radeon_atom_get_clock_info(dev);
else
ret = radeon_combios_get_clock_info(dev);
if (!ret)
ret = radeon_read_clocks_OF(dev);
 
if (ret) {
if (p1pll->reference_div < 2) {
246,6 → 329,14
mpll->max_feedback_div = 0xff;
mpll->best_vco = 0;
 
if (!rdev->clock.default_sclk)
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
 
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
 
}
 
/* 10 khz */
816,53 → 907,3
}
}
 
static void radeon_apply_clock_quirks(struct radeon_device *rdev)
{
uint32_t tmp;
 
/* XXX make sure engine is idle */
 
if (rdev->family < CHIP_RS600) {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
if ((rdev->family == CHIP_RV250)
|| (rdev->family == CHIP_RV280))
tmp |=
RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
if ((rdev->family == CHIP_RV350)
|| (rdev->family == CHIP_RV380))
tmp |= R300_SCLK_FORCE_VAP;
if (rdev->family == CHIP_R420)
tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
} else if (rdev->family < CHIP_R600) {
tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
tmp |= AVIVO_CP_FORCEON;
WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
 
tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
tmp |= AVIVO_E2_FORCEON;
WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
 
tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
tmp |= AVIVO_IDCT_FORCEON;
WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
}
}
 
int radeon_static_clocks_init(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
 
/* XXX make sure engine is idle */
 
if (radeon_dynclks != -1) {
if (radeon_dynclks) {
if (rdev->asic->set_clock_gating)
radeon_set_clock_gating(rdev, 1);
}
}
radeon_apply_clock_quirks(rdev);
return 0;
}
/drivers/video/drm/radeon/radeon_combios.c
39,7 → 39,7
 
/* from radeon_encoder.c */
extern uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
 
55,7 → 55,7
 
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
 
/* old legacy ATI BIOS routines */
448,19 → 448,20
 
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info;
int edid_info, size;
struct edid *edid;
unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
 
edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);
raw = rdev->bios + edid_info;
size = EDID_LENGTH * (raw[0x7e] + 1);
edid = kmalloc(size, GFP_KERNEL);
if (edid == NULL)
return false;
 
memcpy((unsigned char *)edid,
(unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
memcpy((unsigned char *)edid, raw, size);
 
if (!drm_edid_is_valid(edid)) {
kfree(edid);
468,22 → 469,105
}
 
rdev->mode_info.bios_hardcoded_edid = edid;
rdev->mode_info.bios_hardcoded_edid_size = size;
return true;
}
 
/* this is used for atom LCDs as well */
struct edid *
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
if (rdev->mode_info.bios_hardcoded_edid)
return rdev->mode_info.bios_hardcoded_edid;
struct edid *edid;
 
if (rdev->mode_info.bios_hardcoded_edid) {
edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
if (edid) {
memcpy((unsigned char *)edid,
(unsigned char *)rdev->mode_info.bios_hardcoded_edid,
rdev->mode_info.bios_hardcoded_edid_size);
return edid;
}
}
return NULL;
}
 
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
int ddc_line)
enum radeon_combios_ddc ddc,
u32 clk_mask,
u32 data_mask)
{
struct radeon_i2c_bus_rec i2c;
int ddc_line = 0;
 
/* ddc id = mask reg
* DDC_NONE_DETECTED = none
* DDC_DVI = RADEON_GPIO_DVI_DDC
* DDC_VGA = RADEON_GPIO_VGA_DDC
* DDC_LCD = RADEON_GPIOPAD_MASK
* DDC_GPIO = RADEON_MDGPIO_MASK
* r1xx
* DDC_MONID = RADEON_GPIO_MONID
* DDC_CRT2 = RADEON_GPIO_CRT2_DDC
* r200
* DDC_MONID = RADEON_GPIO_MONID
* DDC_CRT2 = RADEON_GPIO_DVI_DDC
* r300/r350
* DDC_MONID = RADEON_GPIO_DVI_DDC
* DDC_CRT2 = RADEON_GPIO_DVI_DDC
* rv2xx/rv3xx
* DDC_MONID = RADEON_GPIO_MONID
* DDC_CRT2 = RADEON_GPIO_MONID
* rs3xx/rs4xx
* DDC_MONID = RADEON_GPIOPAD_MASK
* DDC_CRT2 = RADEON_GPIO_MONID
*/
switch (ddc) {
case DDC_NONE_DETECTED:
default:
ddc_line = 0;
break;
case DDC_DVI:
ddc_line = RADEON_GPIO_DVI_DDC;
break;
case DDC_VGA:
ddc_line = RADEON_GPIO_VGA_DDC;
break;
case DDC_LCD:
ddc_line = RADEON_GPIOPAD_MASK;
break;
case DDC_GPIO:
ddc_line = RADEON_MDGPIO_MASK;
break;
case DDC_MONID:
if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480)
ddc_line = RADEON_GPIOPAD_MASK;
else if (rdev->family == CHIP_R300 ||
rdev->family == CHIP_R350) {
ddc_line = RADEON_GPIO_DVI_DDC;
ddc = DDC_DVI;
} else
ddc_line = RADEON_GPIO_MONID;
break;
case DDC_CRT2:
if (rdev->family == CHIP_R200 ||
rdev->family == CHIP_R300 ||
rdev->family == CHIP_R350) {
ddc_line = RADEON_GPIO_DVI_DDC;
ddc = DDC_DVI;
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480)
ddc_line = RADEON_GPIO_MONID;
else if (rdev->family >= CHIP_RV350) {
ddc_line = RADEON_GPIO_MONID;
ddc = DDC_MONID;
} else
ddc_line = RADEON_GPIO_CRT2_DDC;
break;
}
 
if (ddc_line == RADEON_GPIOPAD_MASK) {
i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
503,15 → 587,6
i2c.y_clk_reg = RADEON_MDGPIO_Y;
i2c.y_data_reg = RADEON_MDGPIO_Y;
} else {
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
i2c.a_data_mask = RADEON_GPIO_A_0;
i2c.en_clk_mask = RADEON_GPIO_EN_1;
i2c.en_data_mask = RADEON_GPIO_EN_0;
i2c.y_clk_mask = RADEON_GPIO_Y_1;
i2c.y_data_mask = RADEON_GPIO_Y_0;
 
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
522,6 → 597,39
i2c.y_data_reg = ddc_line;
}
 
if (clk_mask && data_mask) {
/* system specific masks */
i2c.mask_clk_mask = clk_mask;
i2c.mask_data_mask = data_mask;
i2c.a_clk_mask = clk_mask;
i2c.a_data_mask = data_mask;
i2c.en_clk_mask = clk_mask;
i2c.en_data_mask = data_mask;
i2c.y_clk_mask = clk_mask;
i2c.y_data_mask = data_mask;
} else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
(ddc_line == RADEON_MDGPIO_MASK)) {
/* default gpiopad masks */
i2c.mask_clk_mask = (0x20 << 8);
i2c.mask_data_mask = 0x80;
i2c.a_clk_mask = (0x20 << 8);
i2c.a_data_mask = 0x80;
i2c.en_clk_mask = (0x20 << 8);
i2c.en_data_mask = 0x80;
i2c.y_clk_mask = (0x20 << 8);
i2c.y_data_mask = 0x80;
} else {
/* default masks for ddc pads */
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
i2c.a_data_mask = RADEON_GPIO_A_0;
i2c.en_clk_mask = RADEON_GPIO_EN_1;
i2c.en_data_mask = RADEON_GPIO_EN_0;
i2c.y_clk_mask = RADEON_GPIO_Y_1;
i2c.y_data_mask = RADEON_GPIO_Y_0;
}
 
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
531,10 → 639,7
case CHIP_RS300:
switch (ddc_line) {
case RADEON_GPIO_DVI_DDC:
/* in theory this should be hw capable,
* but it doesn't seem to work
*/
i2c.hw_capable = false;
i2c.hw_capable = true;
break;
default:
i2c.hw_capable = false;
602,9 → 707,10
break;
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
i2c.hpd_id = 0;
 
i2c.i2c_id = ddc;
i2c.hpd = RADEON_HPD_NONE;
 
if (ddc_line)
i2c.valid = true;
else
613,6 → 719,80
return i2c;
}
 
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct radeon_i2c_bus_rec i2c;
 
/* actual hw pads
* r1xx/rs2xx/rs3xx
* 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
* r200
* 0x60, 0x64, 0x68, mm
* r300/r350
* 0x60, 0x64, mm
* rv2xx/rv3xx/rs4xx
* 0x60, 0x64, 0x68, gpiopads, mm
*/
 
/* 0x60 */
i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
/* 0x64 */
i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
 
/* mm i2c */
i2c.valid = true;
i2c.hw_capable = true;
i2c.mm_i2c = true;
i2c.i2c_id = 0xa0;
rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
 
if (rdev->family == CHIP_R300 ||
rdev->family == CHIP_R350) {
/* only 2 sw i2c pads */
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
u16 offset;
u8 id, blocks, clk, data;
int i;
 
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
 
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
}
}
} else if (rdev->family >= CHIP_R200) {
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
} else {
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
/* 0x6c */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
}
}
 
bool radeon_combios_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
633,6 → 813,8
p1pll->reference_div = RBIOS16(pll_info + 0x10);
p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
p1pll->lcd_pll_out_min = p1pll->pll_out_min;
p1pll->lcd_pll_out_max = p1pll->pll_out_max;
 
if (rev > 9) {
p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
684,6 → 866,11
rdev->clock.default_sclk = sclk;
rdev->clock.default_mclk = mclk;
 
if (RBIOS32(pll_info + 0x16))
rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
else
rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
 
return true;
}
return false;
694,6 → 881,10
struct drm_device *dev = rdev->ddev;
u16 igp_info;
 
/* sideport is AMD only */
if (rdev->family == CHIP_RS400)
return false;
 
igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
 
if (igp_info) {
761,6 → 952,8
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
/* if the values are all zeros, use the table */
if (p_dac->ps2_pdac_adj)
found = 1;
}
 
783,31 → 976,31
switch (RBIOS8(tv_info + 7) & 0xf) {
case 1:
tv_std = TV_STD_NTSC;
DRM_INFO("Default TV standard: NTSC\n");
DRM_DEBUG_KMS("Default TV standard: NTSC\n");
break;
case 2:
tv_std = TV_STD_PAL;
DRM_INFO("Default TV standard: PAL\n");
DRM_DEBUG_KMS("Default TV standard: PAL\n");
break;
case 3:
tv_std = TV_STD_PAL_M;
DRM_INFO("Default TV standard: PAL-M\n");
DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
break;
case 4:
tv_std = TV_STD_PAL_60;
DRM_INFO("Default TV standard: PAL-60\n");
DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
break;
case 5:
tv_std = TV_STD_NTSC_J;
DRM_INFO("Default TV standard: NTSC-J\n");
DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
break;
case 6:
tv_std = TV_STD_SCART_PAL;
DRM_INFO("Default TV standard: SCART-PAL\n");
DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
break;
default:
tv_std = TV_STD_NTSC;
DRM_INFO
DRM_DEBUG_KMS
("Unknown TV standard; defaulting to NTSC\n");
break;
}
814,16 → 1007,16
 
switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
case 0:
DRM_INFO("29.498928713 MHz TV ref clk\n");
DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
break;
case 1:
DRM_INFO("28.636360000 MHz TV ref clk\n");
DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
break;
case 2:
DRM_INFO("14.318180000 MHz TV ref clk\n");
DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
break;
case 3:
DRM_INFO("27.000000000 MHz TV ref clk\n");
DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
break;
default:
break;
896,6 → 1089,8
bg = RBIOS8(dac_info + 0x10) & 0xf;
dac = RBIOS8(dac_info + 0x11) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
/* if the values are all zeros, use the table */
if (tv_dac->ps2_tvdac_adj)
found = 1;
} else if (rev > 1) {
bg = RBIOS8(dac_info + 0xc) & 0xf;
909,6 → 1104,8
bg = RBIOS8(dac_info + 0xe) & 0xf;
dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
/* if the values are all zeros, use the table */
if (tv_dac->ps2_tvdac_adj)
found = 1;
}
tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
926,6 → 1123,8
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
/* if the values are all zeros, use the table */
if (tv_dac->ps2_tvdac_adj)
found = 1;
} else {
bg = RBIOS8(dac_info + 0x4) & 0xf;
934,6 → 1133,8
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
/* if the values are all zeros, use the table */
if (tv_dac->ps2_tvdac_adj)
found = 1;
}
} else {
1104,18 → 1305,20
break;
 
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) ==
lvds->native_mode.vdisplay)) {
lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
RBIOS16(tmp + 21)) * 8;
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
 
lvds->native_mode.vtotal = RBIOS16(tmp + 24);
lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
lvds->native_mode.vsync_end =
((RBIOS16(tmp + 28) & 0xf800) >> 11) +
(RBIOS16(tmp + 28) & 0x7ff);
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
(RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
((RBIOS16(tmp + 28) & 0xf800) >> 11);
 
lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
1184,7 → 1387,7
 
if (tmds_info) {
ver = RBIOS8(tmds_info);
DRM_INFO("DFP table revision: %d\n", ver);
DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
if (ver == 3) {
n = RBIOS8(tmds_info + 5) + 1;
if (n > 4)
1194,7 → 1397,7
RBIOS32(tmds_info + i * 10 + 0x08);
tmds->tmds_pll[i].freq =
RBIOS16(tmds_info + i * 10 + 0x10);
DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
}
1212,7 → 1415,7
stride += 10;
else
stride += 6;
DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
}
1232,8 → 1435,8
struct radeon_i2c_bus_rec i2c_bus;
 
/* default for macs */
i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
 
/* XXX some macs have duallink chips */
switch (rdev->mode_info.connector_table) {
1254,88 → 1457,35
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
uint16_t offset;
uint8_t ver, id, blocks, clk, data;
int i;
uint8_t ver;
enum radeon_combios_ddc gpio;
struct radeon_i2c_bus_rec i2c_bus;
 
tmds->i2c_bus = NULL;
if (rdev->flags & RADEON_IS_IGP) {
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
ver = RBIOS8(offset);
DRM_INFO("GPIO Table revision: %d\n", ver);
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
i2c_bus.valid = true;
i2c_bus.mask_clk_mask = (1 << clk);
i2c_bus.mask_data_mask = (1 << data);
i2c_bus.a_clk_mask = (1 << clk);
i2c_bus.a_data_mask = (1 << data);
i2c_bus.en_clk_mask = (1 << clk);
i2c_bus.en_data_mask = (1 << data);
i2c_bus.y_clk_mask = (1 << clk);
i2c_bus.y_data_mask = (1 << data);
i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
tmds->dvo_chip = DVO_SIL164;
tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
break;
}
}
}
} else {
offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
if (offset) {
ver = RBIOS8(offset);
DRM_INFO("External TMDS Table revision: %d\n", ver);
DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
tmds->slave_addr = RBIOS8(offset + 4 + 2);
tmds->slave_addr >>= 1; /* 7 bit addressing */
gpio = RBIOS8(offset + 4 + 3);
switch (gpio) {
case DDC_MONID:
i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_DVI:
i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_VGA:
i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_CRT2:
/* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
if (rdev->family >= CHIP_R300)
i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
else
i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_LCD: /* MM i2c */
if (gpio == DDC_LCD) {
/* MM i2c */
i2c_bus.valid = true;
i2c_bus.hw_capable = true;
i2c_bus.mm_i2c = true;
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
default:
DRM_ERROR("Unsupported gpio %d\n", gpio);
break;
i2c_bus.i2c_id = 0xa0;
} else
i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
}
}
}
 
if (!tmds->i2c_bus) {
DRM_INFO("No valid Ext TMDS info found in BIOS\n");
1398,8 → 1548,22
/* PowerMac8,1 ? */
/* imac g5 isight */
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
} else if ((rdev->pdev->device == 0x4a48) &&
(rdev->pdev->subsystem_vendor == 0x1002) &&
(rdev->pdev->subsystem_device == 0x4a48)) {
/* Mac X800 */
rdev->mode_info.connector_table = CT_MAC_X800;
} else if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3")) {
/* Mac G5 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
if (ASIC_IS_RN50(rdev))
rdev->mode_info.connector_table = CT_RN50_POWER;
else
#endif
rdev->mode_info.connector_table = CT_GENERIC;
}
 
1410,10 → 1574,10
/* these are the most common settings */
if (rdev->flags & RADEON_SINGLE_CRTC) {
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1425,10 → 1589,10
&hpd);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, 0);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
1440,10 → 1604,10
&hpd);
 
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1455,15 → 1619,15
&hpd);
} else {
/* DVI-I - tv dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
1476,10 → 1640,10
&hpd);
 
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1496,7 → 1660,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1512,10 → 1676,10
DRM_INFO("Connector Table: %d (ibook)\n",
rdev->mode_info.connector_table);
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
1524,10 → 1688,10
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* VGA - TV DAC */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
1539,7 → 1703,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1553,10 → 1717,10
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
1565,15 → 1729,15
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - primary dac, ext tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1588,7 → 1752,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1602,10 → 1766,10
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
1614,15 → 1778,15
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - primary dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1636,7 → 1800,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1650,10 → 1814,10
DRM_INFO("Connector Table: %d (powerbook vga)\n",
rdev->mode_info.connector_table);
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
1662,10 → 1826,10
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1677,7 → 1841,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1691,15 → 1855,15
DRM_INFO("Connector Table: %d (mini external tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, ext tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
1714,7 → 1878,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1728,15 → 1892,15
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
1750,7 → 1914,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1764,10 → 1928,10
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
rdev->mode_info.connector_table);
/* DVI-D - int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
1776,10 → 1940,10
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
&hpd);
/* VGA - tv dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
1791,7 → 1955,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1805,10 → 1969,10
DRM_INFO("Connector Table: %d (emac)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
1817,10 → 1981,10
CONNECTOR_OBJECT_ID_VGA,
&hpd);
/* VGA - tv dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
1832,7 → 1996,7
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
1842,6 → 2006,130
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
case CT_RN50_POWER:
DRM_INFO("Connector Table: %d (rn50-power)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
break;
case CT_MAC_X800:
DRM_INFO("Connector Table: %d (mac x800)\n",
rdev->mode_info.connector_table);
/* DVI - primary dac, internal tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
/* DVI - tv dac, dvo */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
&hpd);
break;
case CT_MAC_G5_9600:
DRM_INFO("Connector Table: %d (mac g5 9600)\n",
rdev->mode_info.connector_table);
/* DVI - tv dac, dvo */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
/* ADC - primary dac, internal tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
/* TV - TV DAC */
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
1860,32 → 2148,7
struct radeon_i2c_bus_rec *ddc_i2c,
struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
 
/* XPRESS DDC quirks */
if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
else if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
ddc_i2c->mask_clk_mask = (0x20 << 8);
ddc_i2c->mask_data_mask = 0x80;
ddc_i2c->a_clk_mask = (0x20 << 8);
ddc_i2c->a_data_mask = 0x80;
ddc_i2c->en_clk_mask = (0x20 << 8);
ddc_i2c->en_data_mask = 0x80;
ddc_i2c->y_clk_mask = (0x20 << 8);
ddc_i2c->y_data_mask = 0x80;
}
 
/* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
if ((rdev->family >= CHIP_R300) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
 
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
if (dev->pdev->device == 0x515e &&
1895,15 → 2158,6
return false;
}
 
/* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
if (dev->pdev->device == 0x5159 &&
dev->pdev->subsystem_vendor == 0x1002 &&
dev->pdev->subsystem_device == 0x013a) {
if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
*legacy_connector = CONNECTOR_CRT_LEGACY;
 
}
 
/* X300 card with extra non-existent DVI port */
if (dev->pdev->device == 0x5B60 &&
dev->pdev->subsystem_vendor == 0x17af &&
1997,26 → 2251,7
connector = (tmp >> 12) & 0xf;
 
ddc_type = (tmp >> 8) & 0xf;
switch (ddc_type) {
case DDC_MONID:
ddc_i2c =
combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
break;
}
ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
 
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
2043,7 → 2278,7
else
devices = ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
2057,7 → 2292,7
if (tmp & 0x1) {
devices = ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
2065,7 → 2300,7
} else {
devices = ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
2085,7 → 2320,7
if (tmp & 0x1) {
devices |= ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
2093,7 → 2328,7
} else {
devices |= ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
2102,7 → 2337,7
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
2111,7 → 2346,7
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
2136,7 → 2371,7
connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
2149,7 → 2384,7
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
2173,21 → 2408,21
uint16_t tmds_info =
combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
DRM_DEBUG("Found DFP table, assuming DVI connector\n");
DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
 
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
 
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
hpd.hpd = RADEON_HPD_NONE;
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
2199,14 → 2434,14
} else {
uint16_t crt_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
DRM_DEBUG("Found CRT table, assuming VGA connector\n");
DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
if (crt_info) {
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
2216,7 → 2451,7
CONNECTOR_OBJECT_ID_VGA,
&hpd);
} else {
DRM_DEBUG("No connector info found\n");
DRM_DEBUG_KMS("No connector info found\n");
return false;
}
}
2231,7 → 2466,7
COMBIOS_LCD_DDC_INFO_TABLE);
 
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
2239,73 → 2474,28
if (lcd_ddc_info) {
ddc_type = RBIOS8(lcd_ddc_info + 2);
switch (ddc_type) {
case DDC_MONID:
ddc_i2c =
combios_setup_i2c_bus
(rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
combios_setup_i2c_bus
(rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
combios_setup_i2c_bus
(rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
combios_setup_i2c_bus
(rdev, RADEON_GPIO_CRT2_DDC);
break;
case DDC_LCD:
ddc_i2c =
combios_setup_i2c_bus
(rdev, RADEON_GPIOPAD_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
RBIOS32(lcd_ddc_info + 7);
ddc_i2c.a_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
combios_setup_i2c_bus(rdev,
DDC_LCD,
RBIOS32(lcd_ddc_info + 3),
RBIOS32(lcd_ddc_info + 7));
radeon_i2c_add(rdev, &ddc_i2c, "LCD");
break;
case DDC_GPIO:
ddc_i2c =
combios_setup_i2c_bus
(rdev, RADEON_MDGPIO_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
RBIOS32(lcd_ddc_info + 7);
ddc_i2c.a_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
combios_setup_i2c_bus(rdev,
DDC_GPIO,
RBIOS32(lcd_ddc_info + 3),
RBIOS32(lcd_ddc_info + 7));
radeon_i2c_add(rdev, &ddc_i2c, "LCD");
break;
default:
ddc_i2c.valid = false;
ddc_i2c =
combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
break;
}
DRM_DEBUG("LCD DDC Info Table found!\n");
DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
} else
ddc_i2c.valid = false;
 
2328,8 → 2518,9
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
hpd.hpd = RADEON_HPD_NONE;
ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
2350,6 → 2541,12
return true;
}
 
static const char *thermal_controller_names[] = {
"NONE",
"lm63",
"adm1032",
};
 
void radeon_combios_get_power_modes(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
2357,8 → 2554,67
u8 rev, blocks, tmp;
int state_index = 0;
 
rdev->pm.default_power_state = NULL;
rdev->pm.default_power_state_index = -1;
 
/* allocate 2 power states */
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
if (!rdev->pm.power_state) {
rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = 0;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
return;
}
 
/* check for a thermal chip */
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
struct radeon_i2c_bus_rec i2c_bus;
 
rev = RBIOS8(offset);
 
if (rev == 0) {
thermal_controller = RBIOS8(offset + 3);
gpio = RBIOS8(offset + 4) & 0x3f;
i2c_addr = RBIOS8(offset + 5);
} else if (rev == 1) {
thermal_controller = RBIOS8(offset + 4);
gpio = RBIOS8(offset + 5) & 0x3f;
i2c_addr = RBIOS8(offset + 6);
} else if (rev == 2) {
thermal_controller = RBIOS8(offset + 4);
gpio = RBIOS8(offset + 5) & 0x3f;
i2c_addr = RBIOS8(offset + 6);
clk_bit = RBIOS8(offset + 0xa);
data_bit = RBIOS8(offset + 0xb);
}
if ((thermal_controller > 0) && (thermal_controller < 3)) {
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
thermal_controller_names[thermal_controller],
i2c_addr >> 1);
if (gpio == DDC_LCD) {
/* MM i2c */
i2c_bus.valid = true;
i2c_bus.hw_capable = true;
i2c_bus.mm_i2c = true;
i2c_bus.i2c_id = 0xa0;
} else if (gpio == DDC_GPIO)
i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
else
i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = thermal_controller_names[thermal_controller];
info.addr = i2c_addr >> 1;
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
}
 
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
if (offset) {
2371,17 → 2627,13
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
goto default_mode;
/* skip overclock modes for now */
if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk >
rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
goto default_mode;
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
misc = RBIOS16(offset + 0x5 + 0x0);
if (rev > 4)
misc2 = RBIOS16(offset + 0x5 + 0xe);
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
if (misc & 0x4) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
if (misc & 0x8)
2428,8 → 2680,9
} else
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if (rev > 6)
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
rdev->pm.power_state[state_index].pcie_lanes =
RBIOS8(offset + 0x5 + 0x10);
rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
state_index++;
} else {
/* XXX figure out some good default low power mode for mobility cards w/out power tables */
2446,17 → 2699,19
rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
if ((state_index > 0) &&
(rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
rdev->pm.power_state[state_index].clock_info[0].voltage =
rdev->pm.power_state[0].clock_info[0].voltage;
else
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if (rdev->asic->get_pcie_lanes)
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
else
rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
rdev->pm.power_state[state_index].pcie_lanes = 16;
rdev->pm.power_state[state_index].flags = 0;
rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = state_index + 1;
 
rdev->pm.current_power_state = rdev->pm.default_power_state;
rdev->pm.current_clock_mode =
rdev->pm.default_power_state->default_clock_mode;
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
}
 
void radeon_external_tmds_setup(struct drm_encoder *encoder)
2906,9 → 3161,8
if (rev < 3) {
mem_cntl = RBIOS32(offset + 1);
mem_size = RBIOS16(offset + 5);
if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
((dev->pdev->device != 0x515e)
&& (dev->pdev->device != 0x5969)))
if ((rdev->family < CHIP_R200) &&
!ASIC_IS_RN50(rdev))
WREG32(RADEON_MEM_CNTL, mem_cntl);
}
}
2919,10 → 3173,8
if (offset) {
rev = RBIOS8(offset - 1);
if (rev < 1) {
if (((rdev->flags & RADEON_FAMILY_MASK) <
CHIP_R200)
&& ((dev->pdev->device != 0x515e)
&& (dev->pdev->device != 0x5969))) {
if ((rdev->family < CHIP_R200)
&& !ASIC_IS_RN50(rdev)) {
int ram = 0;
int mem_addr_mapping = 0;
 
3007,6 → 3259,22
combios_write_ram_size(dev);
}
 
/* quirk for rs4xx HP nx6125 laptop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS480 &&
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x308b)
return;
 
/* quirk for rs4xx HP dv5000 laptop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS480 &&
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x30a4)
return;
 
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
3070,7 → 3338,7
if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
if (connected) {
DRM_DEBUG("TV1 connected\n");
DRM_DEBUG_KMS("TV1 connected\n");
/* fix me */
bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
/*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
3077,7 → 3345,7
bios_5_scratch |= RADEON_TV1_ON;
bios_5_scratch |= RADEON_ACC_REQ_TV1;
} else {
DRM_DEBUG("TV1 disconnected\n");
DRM_DEBUG_KMS("TV1 disconnected\n");
bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_TV1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
3086,12 → 3354,12
if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
DRM_DEBUG("LCD1 connected\n");
DRM_DEBUG_KMS("LCD1 connected\n");
bios_4_scratch |= RADEON_LCD1_ATTACHED;
bios_5_scratch |= RADEON_LCD1_ON;
bios_5_scratch |= RADEON_ACC_REQ_LCD1;
} else {
DRM_DEBUG("LCD1 disconnected\n");
DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
bios_5_scratch &= ~RADEON_LCD1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
3100,12 → 3368,12
if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
DRM_DEBUG("CRT1 connected\n");
DRM_DEBUG_KMS("CRT1 connected\n");
bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
bios_5_scratch |= RADEON_CRT1_ON;
bios_5_scratch |= RADEON_ACC_REQ_CRT1;
} else {
DRM_DEBUG("CRT1 disconnected\n");
DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_CRT1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
3114,12 → 3382,12
if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
DRM_DEBUG("CRT2 connected\n");
DRM_DEBUG_KMS("CRT2 connected\n");
bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
bios_5_scratch |= RADEON_CRT2_ON;
bios_5_scratch |= RADEON_ACC_REQ_CRT2;
} else {
DRM_DEBUG("CRT2 disconnected\n");
DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_CRT2_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
3128,12 → 3396,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP1 connected\n");
DRM_DEBUG_KMS("DFP1 connected\n");
bios_4_scratch |= RADEON_DFP1_ATTACHED;
bios_5_scratch |= RADEON_DFP1_ON;
bios_5_scratch |= RADEON_ACC_REQ_DFP1;
} else {
DRM_DEBUG("DFP1 disconnected\n");
DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
bios_5_scratch &= ~RADEON_DFP1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
3142,12 → 3410,12
if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
DRM_DEBUG("DFP2 connected\n");
DRM_DEBUG_KMS("DFP2 connected\n");
bios_4_scratch |= RADEON_DFP2_ATTACHED;
bios_5_scratch |= RADEON_DFP2_ON;
bios_5_scratch |= RADEON_ACC_REQ_DFP2;
} else {
DRM_DEBUG("DFP2 disconnected\n");
DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
bios_5_scratch &= ~RADEON_DFP2_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
/drivers/video/drm/radeon/radeon_connectors.c
40,6 → 40,10
struct drm_encoder *encoder,
bool connected);
 
extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
 
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
46,22 → 50,23
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_needs_link_train(radeon_connector)) {
if (connector->encoder)
dp_link_train(connector->encoder, connector);
/* powering up/down the eDP panel generates hpd events which
* can interfere with modesetting.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
return;
 
/* pre-r600 did not always have the hpd pins mapped accurately to connectors */
if (rdev->family >= CHIP_R600) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
}
}
 
}
 
static void radeon_property_change_mode(struct drm_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
162,6 → 167,7
{
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
struct radeon_connector *radeon_conflict;
int i;
 
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
168,6 → 174,7
if (conflict == connector)
continue;
 
radeon_conflict = to_radeon_connector(conflict);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (conflict->encoder_ids[i] == 0)
break;
177,14 → 184,17
if (conflict->status != connector_status_connected)
continue;
 
if (radeon_conflict->use_digital)
continue;
 
if (priority == true) {
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
current_status = connector_status_disconnected;
}
break;
209,7 → 219,7
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
 
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
} else if (native_mode->hdisplay != 0 &&
native_mode->vdisplay != 0) {
/* mac laptops without an edid */
221,7 → 231,7
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
287,6 → 297,7
 
if (property == rdev->mode_info.coherent_mode_property) {
struct radeon_encoder_atom_dig *dig;
bool new_coherent_mode;
 
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
299,10 → 310,55
return 0;
 
dig = radeon_encoder->enc_priv;
dig->coherent_mode = val ? true : false;
new_coherent_mode = val ? true : false;
if (dig->coherent_mode != new_coherent_mode) {
dig->coherent_mode = new_coherent_mode;
radeon_property_change_mode(&radeon_encoder->base);
}
}
 
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
 
radeon_encoder = to_radeon_encoder(encoder);
 
if (radeon_encoder->underscan_type != val) {
radeon_encoder->underscan_type = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
 
if (property == rdev->mode_info.underscan_hborder_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
 
radeon_encoder = to_radeon_encoder(encoder);
 
if (radeon_encoder->underscan_hborder != val) {
radeon_encoder->underscan_hborder = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
 
if (property == rdev->mode_info.underscan_vborder_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
 
radeon_encoder = to_radeon_encoder(encoder);
 
if (radeon_encoder->underscan_vborder != val) {
radeon_encoder->underscan_vborder = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
 
if (property == rdev->mode_info.tv_std_property) {
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
if (!encoder) {
315,7 → 371,7
radeon_encoder = to_radeon_encoder(encoder);
if (!radeon_encoder->enc_priv)
return 0;
if (rdev->is_atom_bios) {
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
struct radeon_encoder_atom_dac *dac_int;
dac_int = radeon_encoder->enc_priv;
dac_int->tv_std = val;
381,13 → 437,13
mode->vdisplay == native_mode->vdisplay) {
*native_mode = *mode;
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_INFO("Determined LVDS native mode details from EDID\n");
DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;
}
}
}
if (!native_mode->clock) {
DRM_INFO("No LVDS native mode details, disabling RMX\n");
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
}
}
421,6 → 477,9
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
/* add the width/height from vbios tables if available */
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
458,7 → 517,8
return MODE_OK;
}
 
static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
static enum drm_connector_status
radeon_lvds_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
495,8 → 555,6
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
513,7 → 571,7
struct radeon_encoder *radeon_encoder;
enum radeon_rmx_type rmx_type;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
if (property != dev->mode_config.scaling_mode_property)
return 0;
 
568,13 → 626,22
static int radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* XXX check mode bandwidth */
/* XXX verify against max DAC output frequency */
 
if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
static enum drm_connector_status
radeon_vga_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
612,6 → 679,11
ret = connector_status_connected;
}
} else {
 
/* if we aren't forcing don't do destructive polling */
if (!force)
return connector->status;
 
if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
620,6 → 692,17
 
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
 
/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
* vbios to deal with KVMs. If we have one and are not able to detect a monitor
* by other means, assume the CRT is connected and use that EDID.
*/
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
rdev->mode_info.bios_hardcoded_edid_size) {
ret = connector_status_connected;
}
 
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
670,7 → 753,8
return MODE_OK;
}
 
static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
static enum drm_connector_status
radeon_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
727,8 → 811,11
* we have to check if this analog encoder is shared with anyone else (TV)
* if its shared we have to set the other connector to disconnected.
*/
static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
static enum drm_connector_status
radeon_dvi_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
762,14 → 849,12
} else
ret = connector_status_connected;
 
/* multiple connectors on the same encoder with the same ddc line
* This tends to be HDMI and DVI on the same encoder with the
* same ddc line. If the edid says HDMI, consider the HDMI port
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
* you don't really know what's connected to which port as both are digital.
*/
if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
776,19 → 861,17
if (connector == list_connector)
continue;
list_radeon_connector = to_radeon_connector(list_connector);
if (radeon_connector->devices == list_radeon_connector->devices) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
if (list_radeon_connector->shared_ddc &&
(list_radeon_connector->ddc_bus->rec.i2c_id ==
radeon_connector->ddc_bus->rec.i2c_id)) {
/* cases where both connectors are digital */
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
}
} else {
if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
(connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
}
}
}
795,11 → 878,15
}
}
}
}
 
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
goto out;
 
if (!force) {
ret = connector->status;
goto out;
}
 
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
832,6 → 919,19
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
}
 
/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
* vbios to deal with KVMs. If we have one and are not able to detect a monitor
* by other means, assume the DFP is connected and use that EDID. In most
* cases the DVI port is actually a virtual KVM port connected to the service
* processor.
*/
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
rdev->mode_info.bios_hardcoded_edid_size) {
radeon_connector->use_digital = true;
ret = connector_status_connected;
}
 
out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
909,9 → 1009,23
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
if (ASIC_IS_DCE3(rdev)) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
else
return MODE_OK;
} else
return MODE_CLOCK_HIGH;
} else
return MODE_CLOCK_HIGH;
}
 
/* check against the max pixel clock */
if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
935,12 → 1049,10
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
950,18 → 1062,131
static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
int ret;
 
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
struct drm_encoder *encoder;
struct drm_display_mode *mode;
 
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
ret = radeon_ddc_get_modes(radeon_connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
 
if (ret > 0) {
encoder = radeon_best_single_encoder(connector);
if (encoder) {
radeon_fixup_lvds_native_mode(encoder, connector);
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
return ret;
}
 
static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
encoder = radeon_best_single_encoder(connector);
if (!encoder)
return 0;
 
/* we have no EDID modes */
mode = radeon_fp_native_mode(encoder);
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
/* add the width/height from vbios tables if available */
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
} else
ret = radeon_ddc_get_modes(radeon_connector);
 
return ret;
}
 
bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
bool found = false;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
 
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
found = true;
break;
default:
break;
}
}
 
return found;
}
 
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
bool found = false;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
 
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
}
 
return found;
}
 
bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
 
if (ASIC_IS_DCE5(rdev) &&
(rdev->clock.dp_extclk >= 53900) &&
radeon_connector_encoder_is_hbr2(connector)) {
return true;
}
 
return false;
}
 
static enum drm_connector_status
radeon_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
u8 sink_type;
 
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
968,20 → 1193,44
radeon_connector->edid = NULL;
}
 
sink_type = radeon_dp_getsinktype(radeon_connector);
if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(sink_type == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_getdpcd(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
 
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
}
/* eDP is always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
if (radeon_ddc_probe(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
radeon_dp_getdpcd(radeon_connector);
} else {
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
if (radeon_ddc_probe(radeon_connector))
ret = connector_status_connected;
}
}
}
 
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
 
993,12 → 1242,39
 
/* XXX check mode bandwidth */
 
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 
if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
return MODE_PANEL;
 
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
 
/* AVIVO hardware supports downscaling modes larger than the panel
* to the panel size, but I'm not sure this is desirable.
*/
if ((mode->hdisplay > native_mode->hdisplay) ||
(mode->vdisplay > native_mode->vdisplay))
return MODE_PANEL;
 
/* if scaling is disabled, block non-native modes */
if (radeon_encoder->rmx_type == RMX_OFF) {
if ((mode->hdisplay != native_mode->hdisplay) ||
(mode->vdisplay != native_mode->vdisplay))
return MODE_PANEL;
}
}
return MODE_OK;
} else {
if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(radeon_connector, mode);
return radeon_dp_mode_valid_helper(connector, mode);
else
return MODE_OK;
}
}
 
struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
1021,23 → 1297,31
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd)
struct radeon_hpd *hpd,
struct radeon_router *router)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *radeon_dig_connector;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
int ret;
bool is_dp_bridge = false;
 
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
 
/* if the user selected tv=0 don't try and add the connector */
if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
(radeon_tv == 0))
return;
 
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
1050,9 → 1334,29
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
if (radeon_connector->router_bus && router->ddc_valid &&
(radeon_connector->router.router_id == router->router_id)) {
radeon_connector->shared_ddc = false;
shared_ddc = false;
}
}
}
 
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & supported_device) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
is_dp_bridge = true;
break;
default:
break;
}
}
}
 
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
if (!radeon_connector)
return;
1064,36 → 1368,107
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
 
if (is_dp_bridge) {
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
}
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
1100,22 → 1475,30
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
1122,6 → 1505,11
rdev->mode_info.load_detect_property,
1);
}
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_DVII)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
1128,63 → 1516,101
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
if (connector_type == DRM_MODE_CONNECTOR_eDP)
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
else
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
}
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1192,38 → 1618,45
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_atombios_get_tv_info(rdev));
}
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
}
}
 
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
if (i2c_bus->valid)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
 
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
 
failed:
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}
1241,12 → 1674,17
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
int ret;
 
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
 
/* if the user selected tv=0 don't try and add the connector */
if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
(radeon_tv == 0))
return;
 
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
1269,44 → 1707,49
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1313,15 → 1756,17
1);
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_DVII)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
* of false positive on load detect, we haven't yet
1336,32 → 1781,44
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
}
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (ret)
goto failed;
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
}
 
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
if (i2c_bus->valid)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
struct drm_encoder *drm_encoder;
 
failed:
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder;
 
radeon_encoder = to_radeon_encoder(drm_encoder);
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
radeon_legacy_backlight_init(radeon_encoder, connector);
}
}
}
/drivers/video/drm/radeon/radeon_device.c
32,7 → 32,6
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "display.h"
 
53,8 → 52,12
int radeon_new_pll = -1;
int radeon_dynpm = -1;
int radeon_audio = 1;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_disp_priority = 0;
 
 
 
extern display_t *rdisplay;
 
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
76,6 → 79,58
#define VGA_RSRC_NORMAL_MEM 0x08
 
 
static const char radeon_family_name[][16] = {
"R100",
"RV100",
"RS100",
"RV200",
"RS200",
"R200",
"RV250",
"RS300",
"RV280",
"R300",
"R350",
"RV350",
"RV380",
"R420",
"R423",
"RV410",
"RS400",
"RS480",
"RS600",
"RS690",
"RS740",
"RV515",
"R520",
"RV530",
"RV560",
"RV570",
"R580",
"R600",
"RV610",
"RV630",
"RV670",
"RV620",
"RV635",
"RS780",
"RS880",
"RV770",
"RV730",
"RV710",
"RV740",
"CEDAR",
"REDWOOD",
"JUNIPER",
"CYPRESS",
"HEMLOCK",
"PALM",
"BARTS",
"TURKS",
"CAICOS",
"CAYMAN",
"LAST",
};
 
/*
* Clear GPU surface registers.
107,9 → 162,10
} else {
rdev->scratch.num_reg = 7;
}
rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
 
161,7 → 217,7
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*
* Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
* Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
* this shouldn't be a problem as we are using the PCI aperture as a reference.
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
* not IGP.
189,13 → 245,13
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
216,23 → 272,23
{
u64 size_af, size_bf;
 
size_af = 0xFFFFFFFF - mc->vram_end;
size_bf = mc->vram_start;
size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_bf;
}
mc->gtt_start = mc->vram_start - mc->gtt_size;
mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
} else {
if (mc->gtt_size > size_af) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_af;
}
mc->gtt_start = mc->vram_end + 1;
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
 
244,8 → 300,13
uint32_t reg;
 
/* first check CRTCs */
if (ASIC_IS_DCE4(rdev)) {
if (ASIC_IS_DCE41(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
} else if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
280,6 → 341,26
 
}
 
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
u32 sclk = rdev->pm.current_sclk;
u32 mclk = rdev->pm.current_mclk;
 
/* sclk/mclk in Mhz */
a.full = dfixed_const(100);
rdev->pm.sclk.full = dfixed_const(sclk);
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
rdev->pm.mclk.full = dfixed_const(mclk);
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 
if (rdev->flags & RADEON_IS_IGP) {
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
}
}
 
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
323,181 → 404,6
}
 
 
/*
* Registers accessors functions.
*/
uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
BUG_ON(1);
return 0;
}
 
void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
reg, v);
BUG_ON(1);
}
 
void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
rdev->mc_wreg = &radeon_invalid_wreg;
rdev->pll_rreg = &radeon_invalid_rreg;
rdev->pll_wreg = &radeon_invalid_wreg;
rdev->pciep_rreg = &radeon_invalid_rreg;
rdev->pciep_wreg = &radeon_invalid_wreg;
 
/* Don't change order as we are overridding accessor. */
if (rdev->family < CHIP_RV515) {
rdev->pcie_reg_mask = 0xff;
} else {
rdev->pcie_reg_mask = 0x7ff;
}
/* FIXME: not sure here */
if (rdev->family <= CHIP_R580) {
rdev->pll_rreg = &r100_pll_rreg;
rdev->pll_wreg = &r100_pll_wreg;
}
if (rdev->family >= CHIP_R420) {
rdev->mc_rreg = &r420_mc_rreg;
rdev->mc_wreg = &r420_mc_wreg;
}
if (rdev->family >= CHIP_RV515) {
rdev->mc_rreg = &rv515_mc_rreg;
rdev->mc_wreg = &rv515_mc_wreg;
}
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
rdev->mc_rreg = &rs400_mc_rreg;
rdev->mc_wreg = &rs400_mc_wreg;
}
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
rdev->mc_rreg = &rs690_mc_rreg;
rdev->mc_wreg = &rs690_mc_wreg;
}
if (rdev->family == CHIP_RS600) {
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
}
 
 
/*
* ASIC
*/
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
case CHIP_RS100:
case CHIP_RV200:
case CHIP_RS200:
rdev->asic = &r100_asic;
break;
case CHIP_R200:
case CHIP_RV250:
case CHIP_RS300:
case CHIP_RV280:
rdev->asic = &r200_asic;
break;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV380:
if (rdev->flags & RADEON_IS_PCIE)
rdev->asic = &r300_asic_pcie;
else
rdev->asic = &r300_asic;
break;
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
rdev->asic = &r420_asic;
break;
case CHIP_RS400:
case CHIP_RS480:
rdev->asic = &rs400_asic;
break;
case CHIP_RS600:
rdev->asic = &rs600_asic;
break;
case CHIP_RS690:
case CHIP_RS740:
rdev->asic = &rs690_asic;
break;
case CHIP_RV515:
rdev->asic = &rv515_asic;
break;
case CHIP_R520:
case CHIP_RV530:
case CHIP_RV560:
case CHIP_RV570:
case CHIP_R580:
rdev->asic = &r520_asic;
break;
case CHIP_R600:
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &r600_asic;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
rdev->asic = &rv770_asic;
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
rdev->asic = &evergreen_asic;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
 
if (rdev->flags & RADEON_IS_IGP) {
rdev->asic->get_memory_clock = NULL;
rdev->asic->set_memory_clock = NULL;
}
 
return 0;
}
 
 
/*
* Wrapper around modesetting bits.
*/
int radeon_clocks_init(struct radeon_device *rdev)
{
int r;
 
r = radeon_static_clocks_init(rdev->ddev);
if (r) {
return r;
}
DRM_INFO("Clocks initialized !\n");
return 0;
}
 
void radeon_clocks_fini(struct radeon_device *rdev)
{
}
 
/* ATOM accessor methods */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
547,6 → 453,22
return r;
}
 
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
 
WREG32_IO(reg*4, val);
}
 
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
 
r = RREG32_IO(reg*4);
return r;
}
 
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
559,6 → 481,15
atom_card_info->dev = rdev->ddev;
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
if (rdev->rio_mem) {
atom_card_info->ioreg_read = cail_ioreg_read;
atom_card_info->ioreg_write = cail_ioreg_write;
} else {
DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
atom_card_info->ioreg_read = cail_reg_read;
atom_card_info->ioreg_write = cail_reg_write;
}
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
602,29 → 533,6
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
 
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
if (rdev->family >= CHIP_R600) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
} else if (rdev->family >= CHIP_RV515 ||
rdev->family == CHIP_RV380 ||
rdev->family == CHIP_RV410 ||
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
 
void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
696,10 → 604,9
struct pci_dev *pdev,
uint32_t flags)
{
int r;
int r, i;
int dma_bits;
 
DRM_INFO("radeon: Initializing kernel modesetting.\n");
rdev->shutdown = false;
rdev->ddev = ddev;
rdev->pdev = pdev;
710,6 → 617,10
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->gpu_lockup = false;
rdev->accel_working = false;
 
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device);
 
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->cs_mutex);
716,9 → 627,13
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
// if (rdev->family >= CHIP_R600)
// spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
// rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
 
/* Set asic functions */
r = radeon_asic_init(rdev);
726,6 → 641,14
return r;
radeon_check_arguments(rdev);
 
/* all of the newer IGP chips have an internal gart
* However some rs4xx report as AGP, so remove that here.
*/
if ((rdev->family >= CHIP_RS400) &&
(rdev->flags & RADEON_IS_IGP)) {
rdev->flags &= ~RADEON_IS_AGP;
}
 
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
751,7 → 674,6
/* Registers mapping */
/* TODO: block userspace mapping of io register */
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
 
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
 
rdev->rmmio = (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
771,7 → 693,7
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
radeon_gpu_reset(rdev);
radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
874,7 → 796,6
INIT_LIST_HEAD(&dev->maplist);
 
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
883,9 → 804,9
if (ret)
goto err_g4;
 
if( radeon_modeset )
init_display_kms(dev->dev_private, &usermode);
else
// if( radeon_modeset )
// init_display_kms(dev->dev_private, &usermode);
// else
init_display(dev->dev_private, &usermode);
 
LEAVE();
1013,12 → 934,7
static char log[256];
static pci_dev_t device;
 
u32_t
#if defined(__GNUC__) && __GNUC__ >= 4
// has sense only if -fwhole-program is used, like Makefile.lto
__attribute__((externally_visible))
#endif
drvEntry(int action, char *cmdline)
u32_t drvEntry(int action, char *cmdline)
{
struct radeon_device *rdev = NULL;
 
1038,7 → 954,7
 
if(!dbg_open(log))
{
strcpy(log, "/rd/1/drivers/atikms.log");
strcpy(log, "/hd2/1/atikms.log");
 
if(!dbg_open(log))
{
1046,9 → 962,10
return 0;
};
}
dbgprintf("Radeon RC10 cmdline %s\n", cmdline);
dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
 
enum_pci_devices();
 
ent = find_pci_device(&device, pciidlist);
 
if( unlikely(ent == NULL) )
1064,11 → 981,11
 
rdev = rdisplay->ddev->dev_private;
 
if( (rdev->asic == &r600_asic) ||
(rdev->asic == &rv770_asic))
r600_2D_test(rdev);
else if (rdev->asic != &evergreen_asic)
r100_2D_test(rdev);
// if( (rdev->asic == &r600_asic) ||
// (rdev->asic == &rv770_asic))
// r600_2D_test(rdev);
// else if (rdev->asic != &evergreen_asic)
// r100_2D_test(rdev);
 
err = RegService("DISPLAY", display_handler);
 
/drivers/video/drm/radeon/radeon_display.c
28,7 → 28,7
#include "radeon.h"
 
#include "atom.h"
//#include <asm/div64.h>
#include <asm/div64.h>
 
#include "drm_crtc_helper.h"
#include "drm_edid.h"
42,7 → 42,7
struct radeon_device *rdev = dev->dev_private;
int i;
 
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
 
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
68,7 → 68,7
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
 
static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
static void dce4_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
75,7 → 75,7
struct radeon_device *rdev = dev->dev_private;
int i;
 
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
 
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
86,12 → 86,12
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
 
WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
 
WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
for (i = 0; i < 256; i++) {
WREG32(EVERGREEN_DC_LUT_30_COLOR,
WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
(radeon_crtc->lut_r[i] << 20) |
(radeon_crtc->lut_g[i] << 10) |
(radeon_crtc->lut_b[i] << 0));
98,6 → 98,66
}
}
 
static void dce5_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int i;
 
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
 
WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
(NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
NI_GRPH_PRESCALE_BYPASS);
WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
NI_OVL_PRESCALE_BYPASS);
WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
(NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
 
WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
 
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
 
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
 
WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
 
WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
for (i = 0; i < 256; i++) {
WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
(radeon_crtc->lut_r[i] << 20) |
(radeon_crtc->lut_g[i] << 10) |
(radeon_crtc->lut_b[i] << 0));
}
 
WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
(NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
(NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
(NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
(NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
 
}
 
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
130,8 → 190,10
if (!crtc->enabled)
return;
 
if (ASIC_IS_DCE4(rdev))
evergreen_crtc_load_lut(crtc);
if (ASIC_IS_DCE5(rdev))
dce5_crtc_load_lut(crtc);
else if (ASIC_IS_DCE4(rdev))
dce4_crtc_load_lut(crtc);
else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
161,17 → 223,13
}
 
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size)
u16 *blue, uint32_t start, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int i;
int end = (start + size > 256) ? 256 : start + size, i;
 
if (size != 256) {
return;
}
 
/* userspace palettes are always correct as is */
for (i = 0; i < 256; i++) {
for (i = start; i < end; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
193,6 → 251,7
.gamma_set = radeon_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
.page_flip = NULL,
};
 
static void radeon_crtc_init(struct drm_device *dev, int index)
229,7 → 288,7
radeon_legacy_init_crtc(dev, radeon_crtc);
}
 
static const char *encoder_names[34] = {
static const char *encoder_names[36] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
264,6 → 323,8
"INTERNAL_KLDSCP_LVTMA",
"INTERNAL_UNIPHY1",
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
};
 
static const char *connector_names[15] = {
284,8 → 345,7
"eDP",
};
 
static const char *hpd_names[7] = {
"NONE",
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
"HPD3",
320,6 → 380,14
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
if (radeon_connector->router.ddc_valid)
DRM_INFO(" DDC Router 0x%x/0x%x\n",
radeon_connector->router.ddc_mux_control_pin,
radeon_connector->router.ddc_mux_state);
if (radeon_connector->router.cd_valid)
DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
radeon_connector->router.cd_mux_control_pin,
radeon_connector->router.cd_mux_state);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
350,6 → 418,8
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP6_SUPPORT)
DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_TV1_SUPPORT)
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CV_SUPPORT)
368,10 → 438,9
 
if (rdev->bios) {
if (rdev->is_atom_bios) {
if (rdev->family >= CHIP_R600)
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
if (ret == false)
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
if (ret == false)
384,8 → 453,9
if (ret) {
radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
 
// list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
// radeon_ddc_dump(drm_connector);
}
 
return ret;
397,6 → 467,10
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
409,9 → 483,17
if (!radeon_connector->edid) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
 
if (!radeon_connector->edid) {
if (rdev->is_atom_bios) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
} else
/* some servers provide a hardcoded edid in rom for KVMs */
if (!radeon_connector->edid)
radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
427,6 → 509,10
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret = 0;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
436,6 → 522,125
return ret;
}
 
/* avivo */
static void avivo_get_fb_div(struct radeon_pll *pll,
u32 target_clock,
u32 post_div,
u32 ref_div,
u32 *fb_div,
u32 *frac_fb_div)
{
u32 tmp = post_div * ref_div;
 
tmp *= target_clock;
*fb_div = tmp / pll->reference_freq;
*frac_fb_div = tmp % pll->reference_freq;
 
if (*fb_div > pll->max_feedback_div)
*fb_div = pll->max_feedback_div;
else if (*fb_div < pll->min_feedback_div)
*fb_div = pll->min_feedback_div;
}
 
static u32 avivo_get_post_div(struct radeon_pll *pll,
u32 target_clock)
{
u32 vco, post_div, tmp;
 
if (pll->flags & RADEON_PLL_USE_POST_DIV)
return pll->post_div;
 
if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
if (pll->flags & RADEON_PLL_IS_LCD)
vco = pll->lcd_pll_out_min;
else
vco = pll->pll_out_min;
} else {
if (pll->flags & RADEON_PLL_IS_LCD)
vco = pll->lcd_pll_out_max;
else
vco = pll->pll_out_max;
}
 
post_div = vco / target_clock;
tmp = vco % target_clock;
 
if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
if (tmp)
post_div++;
} else {
if (!tmp)
post_div--;
}
 
if (post_div > pll->max_post_div)
post_div = pll->max_post_div;
else if (post_div < pll->min_post_div)
post_div = pll->min_post_div;
 
return post_div;
}
 
#define MAX_TOLERANCE 10
 
void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
u32 *frac_fb_div_p,
u32 *ref_div_p,
u32 *post_div_p)
{
u32 target_clock = freq / 10;
u32 post_div = avivo_get_post_div(pll, target_clock);
u32 ref_div = pll->min_ref_div;
u32 fb_div = 0, frac_fb_div = 0, tmp;
 
if (pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div = pll->reference_div;
 
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
if (frac_fb_div >= 5) {
frac_fb_div -= 5;
frac_fb_div = frac_fb_div / 10;
frac_fb_div++;
}
if (frac_fb_div >= 10) {
fb_div++;
frac_fb_div = 0;
}
} else {
while (ref_div <= pll->max_ref_div) {
avivo_get_fb_div(pll, target_clock, post_div, ref_div,
&fb_div, &frac_fb_div);
if (frac_fb_div >= (pll->reference_freq / 2))
fb_div++;
frac_fb_div = 0;
tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
tmp = (tmp * 10000) / target_clock;
 
if (tmp > (10000 + MAX_TOLERANCE))
ref_div++;
else if (tmp >= (10000 - MAX_TOLERANCE))
break;
else
ref_div++;
}
}
 
*dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
(ref_div * post_div * 10);
*fb_div_p = fb_div;
*frac_fb_div_p = frac_fb_div;
*ref_div_p = ref_div;
*post_div_p = post_div;
DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
*dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
}
 
/* pre-avivo */
static inline uint32_t radeon_div(uint64_t n, uint32_t d)
{
uint64_t mod;
446,7 → 651,7
return n;
}
 
static void radeon_compute_pll_legacy(struct radeon_pll *pll,
void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
469,10 → 674,22
uint32_t best_error = 0xffffffff;
uint32_t best_vco_diff = 1;
uint32_t post_div;
u32 pll_out_min, pll_out_max;
 
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
 
if (pll->flags & RADEON_PLL_IS_LCD) {
pll_out_min = pll->lcd_pll_out_min;
pll_out_max = pll->lcd_pll_out_max;
} else {
pll_out_min = pll->pll_out_min;
pll_out_max = pll->pll_out_max;
}
 
if (pll_out_min > 64800)
pll_out_min = 64800;
 
if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
496,7 → 713,7
max_fractional_feed_div = pll->max_frac_feedback_div;
}
 
for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
 
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
536,10 → 753,10
tmp = (uint64_t)pll->reference_freq * feedback_div;
vco = radeon_div(tmp, ref_div);
 
if (vco < pll->pll_out_min) {
if (vco < pll_out_min) {
min_feed_div = feedback_div + 1;
continue;
} else if (vco > pll->pll_out_max) {
} else if (vco > pll_out_max) {
max_feed_div = feedback_div;
continue;
}
551,8 → 768,10
current_freq = radeon_div(tmp, ref_div * post_div);
 
if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
if (freq < current_freq)
error = 0xffffffff;
else
error = freq - current_freq;
error = error < 0 ? 0xffffffff : error;
} else
error = abs(current_freq - freq);
vco_diff = abs(vco - best_vco);
559,7 → 778,7
 
if ((best_vco == 0 && error < best_error) ||
(best_vco != 0 &&
(error < best_error - 100 ||
((best_error > 100 && error < best_error - 100) ||
(abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
best_post_div = post_div;
best_ref_div = ref_div;
610,207 → 829,17
*frac_fb_div_p = best_frac_feedback_div;
*ref_div_p = best_ref_div;
*post_div_p = best_post_div;
}
DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
(long long)freq,
best_freq / 1000, best_feedback_div, best_frac_feedback_div,
best_ref_div, best_post_div);
 
static bool
calc_fb_div(struct radeon_pll *pll,
uint32_t freq,
uint32_t post_div,
uint32_t ref_div,
uint32_t *fb_div,
uint32_t *fb_div_frac)
{
fixed20_12 feedback_divider, a, b;
u32 vco_freq;
 
vco_freq = freq * post_div;
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
a.full = rfixed_const(pll->reference_freq);
feedback_divider.full = rfixed_const(vco_freq);
feedback_divider.full = rfixed_div(feedback_divider, a);
a.full = rfixed_const(ref_div);
feedback_divider.full = rfixed_mul(feedback_divider, a);
 
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
a.full = rfixed_const(10);
feedback_divider.full = rfixed_mul(feedback_divider, a);
feedback_divider.full += rfixed_const_half(0);
feedback_divider.full = rfixed_floor(feedback_divider);
feedback_divider.full = rfixed_div(feedback_divider, a);
 
/* *fb_div = floor(feedback_divider); */
a.full = rfixed_floor(feedback_divider);
*fb_div = rfixed_trunc(a);
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
a.full = rfixed_const(10);
b.full = rfixed_mul(feedback_divider, a);
 
feedback_divider.full = rfixed_floor(feedback_divider);
feedback_divider.full = rfixed_mul(feedback_divider, a);
feedback_divider.full = b.full - feedback_divider.full;
*fb_div_frac = rfixed_trunc(feedback_divider);
} else {
/* *fb_div = floor(feedback_divider + 0.5); */
feedback_divider.full += rfixed_const_half(0);
feedback_divider.full = rfixed_floor(feedback_divider);
 
*fb_div = rfixed_trunc(feedback_divider);
*fb_div_frac = 0;
}
 
if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
return false;
else
return true;
}
 
static bool
calc_fb_ref_div(struct radeon_pll *pll,
uint32_t freq,
uint32_t post_div,
uint32_t *fb_div,
uint32_t *fb_div_frac,
uint32_t *ref_div)
{
fixed20_12 ffreq, max_error, error, pll_out, a;
u32 vco;
 
ffreq.full = rfixed_const(freq);
/* max_error = ffreq * 0.0025; */
a.full = rfixed_const(400);
max_error.full = rfixed_div(ffreq, a);
 
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
vco = vco / ((*ref_div) * 10);
 
if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
continue;
 
/* pll_out = vco / post_div; */
a.full = rfixed_const(post_div);
pll_out.full = rfixed_const(vco);
pll_out.full = rfixed_div(pll_out, a);
 
if (pll_out.full >= ffreq.full) {
error.full = pll_out.full - ffreq.full;
if (error.full <= max_error.full)
return true;
}
}
}
return false;
}
 
static void radeon_compute_pll_new(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p)
{
u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
u32 best_freq = 0, vco_frequency;
 
/* freq = freq / 10; */
do_div(freq, 10);
 
if (pll->flags & RADEON_PLL_USE_POST_DIV) {
post_div = pll->post_div;
if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
goto done;
 
vco_frequency = freq * post_div;
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
goto done;
 
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
ref_div = pll->reference_div;
if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
goto done;
if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
goto done;
}
} else {
for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
(post_div == 10) ||
(post_div == 11))
continue;
}
 
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue;
 
vco_frequency = freq * post_div;
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
continue;
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
ref_div = pll->reference_div;
if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
goto done;
if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
break;
} else {
if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
break;
}
}
}
 
best_freq = pll->reference_freq * 10 * fb_div;
best_freq += pll->reference_freq * fb_div_frac;
best_freq = best_freq / (ref_div * post_div);
 
done:
if (best_freq == 0)
DRM_ERROR("Couldn't find valid PLL dividers\n");
 
*dot_clock_p = best_freq / 10;
*fb_div_p = fb_div;
*frac_fb_div_p = fb_div_frac;
*ref_div_p = ref_div;
*post_div_p = post_div;
 
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
}
 
void radeon_compute_pll(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p)
{
switch (pll->algo) {
case PLL_ALGO_NEW:
radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
frac_fb_div_p, ref_div_p, post_div_p);
break;
case PLL_ALGO_LEGACY:
default:
radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
frac_fb_div_p, ref_div_p, post_div_p);
break;
}
}
 
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
struct drm_device *dev = fb->dev;
 
if (fb->fbdev)
radeonfb_remove(dev, fb);
 
 
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
830,22 → 859,16
.create_handle = radeon_user_framebuffer_create_handle,
};
 
struct drm_framebuffer *
radeon_framebuffer_create(struct drm_device *dev,
void
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
struct radeon_framebuffer *radeon_fb;
 
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
return NULL;
rfb->obj = obj;
drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
}
drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
radeon_fb->obj = obj;
return &radeon_fb->base;
}
 
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
861,9 → 884,10
// return radeon_framebuffer_create(dev, mode_cmd, obj);
}
 
 
static const struct drm_mode_config_funcs radeon_mode_funcs = {
// .fb_create = radeon_user_framebuffer_create,
.fb_changed = radeonfb_probe,
// .output_poll_changed = radeon_output_poll_changed
};
 
struct drm_prop_enum_list {
887,6 → 911,12
{ TV_STD_SECAM, "secam" },
};
 
static struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
 
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
940,20 → 970,75
radeon_tv_std_enum_list[i].name);
}
 
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"underscan", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.underscan_property,
i,
radeon_underscan_enum_list[i].type,
radeon_underscan_enum_list[i].name);
}
 
rdev->mode_info.underscan_hborder_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"underscan hborder", 2);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
rdev->mode_info.underscan_hborder_property->values[0] = 0;
rdev->mode_info.underscan_hborder_property->values[1] = 128;
 
rdev->mode_info.underscan_vborder_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"underscan vborder", 2);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
rdev->mode_info.underscan_vborder_property->values[0] = 0;
rdev->mode_info.underscan_vborder_property->values[1] = 128;
 
return 0;
}
 
void radeon_update_display_priority(struct radeon_device *rdev)
{
/* adjustment options for the display watermarks */
if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
/* set display priority to high for r3xx, rv515 chips
* this avoids flickering due to underflow to the
* display controllers during heavy acceleration.
* Don't force high on rs4xx igp chips as it seems to
* affect the sound card. See kernel bug 15982.
*/
if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
!(rdev->flags & RADEON_IS_IGP))
rdev->disp_priority = 2;
else
rdev->disp_priority = 0;
} else
rdev->disp_priority = radeon_disp_priority;
 
}
 
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
int ret;
 
ENTER();
 
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
 
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
 
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
rdev->ddev->mode_config.max_height = 16384;
} else if (ASIC_IS_AVIVO(rdev)) {
rdev->ddev->mode_config.max_width = 8192;
rdev->ddev->mode_config.max_height = 8192;
} else {
968,6 → 1053,9
return ret;
}
 
/* init i2c buses */
radeon_i2c_init(rdev);
 
/* check combios for a valid hardcoded EDID - Sun servers */
if (!rdev->is_atom_bios) {
/* check for hardcoded EDID in BIOS */
974,15 → 1062,6
radeon_combios_check_hardcoded_edid(rdev);
}
 
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else {
if (ASIC_IS_DCE4(rdev))
rdev->num_crtc = 6;
else
rdev->num_crtc = 2;
}
 
/* allocate crtcs */
for (i = 0; i < rdev->num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
993,9 → 1072,22
if (!ret) {
return ret;
}
 
/* init dig PHYs */
if (rdev->is_atom_bios)
radeon_atom_encoder_init(rdev);
 
/* initialize hpd */
radeon_hpd_init(rdev);
drm_helper_initial_config(rdev->ddev);
// radeon_hpd_init(rdev);
 
/* Initialize power management */
// radeon_pm_init(rdev);
 
radeon_fbdev_init(rdev);
// drm_kms_helper_poll_init(rdev->ddev);
 
LEAVE();
 
return 0;
}
 
1004,26 → 1096,52
kfree(rdev->mode_info.bios_hardcoded_edid);
 
if (rdev->mode_info.mode_config_initialized) {
radeon_hpd_fini(rdev);
// drm_kms_helper_poll_fini(rdev->ddev);
// radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
/* free i2c buses */
radeon_i2c_fini(rdev);
}
 
static bool is_hdtv_mode(struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
(mode->vdisplay == 576) || /* 576p */
(mode->vdisplay == 720) || /* 720p */
(mode->vdisplay == 1080)) /* 1080p */
return true;
else
return false;
}
 
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_encoder *radeon_encoder;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
bool first = true;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
 
radeon_crtc->h_border = 0;
radeon_crtc->v_border = 0;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (encoder->crtc != crtc)
continue;
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
 
if (first) {
/* set scaling */
if (radeon_encoder->rmx_type == RMX_OFF)
1037,16 → 1155,42
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
src_v = crtc->mode.vdisplay;
dst_v = radeon_crtc->native_mode.vdisplay;
src_h = crtc->mode.hdisplay;
dst_h = radeon_crtc->native_mode.hdisplay;
 
/* fix up for overscan on hdmi */
if (ASIC_IS_AVIVO(rdev) &&
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(radeon_connector->edid) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
else
radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
if (radeon_encoder->underscan_vborder != 0)
radeon_crtc->v_border = radeon_encoder->underscan_vborder;
else
radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
radeon_crtc->rmx_type = RMX_FULL;
src_v = crtc->mode.vdisplay;
dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
src_h = crtc->mode.hdisplay;
dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
}
first = false;
} else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
/* WARNING: Right now this can't happen but
* in the future we need to check that scaling
* are consistent accross different encoder
* are consistent across different encoder
* (ie all encoder can work with the same
* scaling).
*/
DRM_ERROR("Scaling not consistent accross encoder.\n");
DRM_ERROR("Scaling not consistent across encoder.\n");
return false;
}
}
1053,15 → 1197,170
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
a.full = rfixed_const(crtc->mode.vdisplay);
b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
radeon_crtc->vsc.full = rfixed_div(a, b);
a.full = rfixed_const(crtc->mode.hdisplay);
b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
radeon_crtc->hsc.full = rfixed_div(a, b);
a.full = dfixed_const(src_v);
b.full = dfixed_const(dst_v);
radeon_crtc->vsc.full = dfixed_div(a, b);
a.full = dfixed_const(src_h);
b.full = dfixed_const(dst_h);
radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
radeon_crtc->vsc.full = rfixed_const(1);
radeon_crtc->hsc.full = rfixed_const(1);
radeon_crtc->vsc.full = dfixed_const(1);
radeon_crtc->hsc.full = dfixed_const(1);
}
return true;
}
 
/*
* Retrieve current video scanout position of crtc on a given gpu.
*
* \param dev Device to query.
* \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
* of scanlines to go until end of vblank, e.g., -1 means "one scanline
* until start of active scanout / end of vblank."
*
* \return Flags, or'ed together as follows:
*
* DRM_SCANOUTPOS_VALID = Query successful.
* DRM_SCANOUTPOS_INVBL = Inside vblank.
* DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but
* unknown small number of scanlines wrt. real scanout position.
*
*/
int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
 
struct radeon_device *rdev = dev->dev_private;
 
if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC0_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC0_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 2) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 3) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 4) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 5) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC5_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
} else if (ASIC_IS_AVIVO(rdev)) {
if (crtc == 0) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
ret |= DRM_SCANOUTPOS_VALID;
}
} else {
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
if (crtc == 0) {
/* Assume vbl_end == 0, get vbl_start from
* upper 16 bits.
*/
vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
/* Only retrieve vpos from upper 16 bits, set hpos == 0. */
position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
stat_crtc = RREG32(RADEON_CRTC_STATUS);
if (!(stat_crtc & 1))
in_vbl = false;
 
ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
stat_crtc = RREG32(RADEON_CRTC2_STATUS);
if (!(stat_crtc & 1))
in_vbl = false;
 
ret |= DRM_SCANOUTPOS_VALID;
}
}
 
/* Decode into vertical and horizontal scanout position. */
*vpos = position & 0x1fff;
*hpos = (position >> 16) & 0x1fff;
 
/* Valid vblank area boundaries from gpu retrieved? */
if (vbl > 0) {
/* Yes: Decode. */
ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
}
else {
/* No: Fake something reasonable which gives at least ok results. */
vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vbl_end = 0;
}
 
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
 
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
* start of scanout.
*/
 
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
*vpos = *vpos - vtotal;
}
 
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
 
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
 
return ret;
}
/drivers/video/drm/radeon/radeon_encoders.c
81,7 → 81,7
}
 
uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t ret = 0;
97,45 → 97,45
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
else
ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
break;
case 2: /* dac b */
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
else {
/*if (rdev->family == CHIP_R200)
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else*/
ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
}
break;
case 3: /* external dac */
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
}
break;
case ATOM_DEVICE_LCD1_SUPPORT:
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
else
ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
break;
case ATOM_DEVICE_DFP1_SUPPORT:
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
else
ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
break;
case ATOM_DEVICE_LCD2_SUPPORT:
case ATOM_DEVICE_DFP2_SUPPORT:
142,14 → 142,14
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
case ATOM_DEVICE_DFP3_SUPPORT:
ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
break;
}
 
176,6 → 176,7
return false;
}
}
 
void
radeon_link_encoder_connector(struct drm_device *dev)
{
205,7 → 206,7
if (connector->encoder == encoder) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
DRM_DEBUG("setting active device to %08x from %08x %08x for encoder %d\n",
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
radeon_encoder->active_device, radeon_encoder->devices,
radeon_connector->devices, encoder->encoder_type);
}
212,7 → 213,7
}
}
 
static struct drm_connector *
struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
228,32 → 229,109
return NULL;
}
 
static struct radeon_connector_atom_dig *
radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
static struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
 
if (!rdev->is_atom_bios)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
if (radeon_encoder->devices & radeon_connector->devices)
return connector;
}
return NULL;
}
 
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *other_encoder;
struct radeon_encoder *other_radeon_encoder;
 
if (radeon_encoder->is_ext_encoder)
return NULL;
 
radeon_connector = to_radeon_connector(connector);
 
if (!radeon_connector->con_priv)
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
if (other_encoder == encoder)
continue;
other_radeon_encoder = to_radeon_encoder(other_encoder);
if (other_radeon_encoder->is_ext_encoder &&
(radeon_encoder->devices & other_radeon_encoder->devices))
return other_encoder;
}
return NULL;
}
 
dig_connector = radeon_connector->con_priv;
bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
{
struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
 
return dig_connector;
if (other_encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
return true;
default:
return false;
}
}
 
return false;
}
 
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
unsigned hblank = native_mode->htotal - native_mode->hdisplay;
unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
 
adjusted_mode->clock = native_mode->clock;
adjusted_mode->flags = native_mode->flags;
 
if (ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = native_mode->hdisplay;
adjusted_mode->vdisplay = native_mode->vdisplay;
}
 
adjusted_mode->htotal = native_mode->hdisplay + hblank;
adjusted_mode->hsync_start = native_mode->hdisplay + hover;
adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
 
adjusted_mode->vtotal = native_mode->vdisplay + vblank;
adjusted_mode->vsync_start = native_mode->vdisplay + vover;
adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
 
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
 
if (ASIC_IS_AVIVO(rdev)) {
adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
}
 
adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
 
adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
 
}
 
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
262,9 → 340,6
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
275,18 → 350,8
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
int mode_id = adjusted_mode->base.id;
*adjusted_mode = *native_mode;
if (!ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
adjusted_mode->crtc_hdisplay = mode->hdisplay;
adjusted_mode->crtc_vdisplay = mode->vdisplay;
}
adjusted_mode->base.id = mode_id;
}
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
 
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
302,7 → 367,7
}
 
if (ASIC_IS_DCE3(rdev) &&
(radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
(radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
317,13 → 382,9
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0, num = 0;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
enum radeon_tv_std tv_std = TV_STD_NTSC;
 
if (dac_info->tv_std)
tv_std = dac_info->tv_std;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
330,12 → 391,10
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
num = 2;
break;
}
 
346,7 → 405,7
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.ucDacStandard = ATOM_DAC1_CV;
else {
switch (tv_std) {
switch (dac_info->tv_std) {
case TV_STD_PAL:
case TV_STD_PAL_M:
case TV_STD_SCART_PAL:
377,11 → 436,7
TV_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
enum radeon_tv_std tv_std = TV_STD_NTSC;
 
if (dac_info->tv_std)
tv_std = dac_info->tv_std;
 
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
391,7 → 446,7
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
else {
switch (tv_std) {
switch (dac_info->tv_std) {
case TV_STD_NTSC:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
428,52 → 483,49
 
}
 
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
};
 
void
atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
atombios_dvo_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
int index = 0;
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
 
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
if (ASIC_IS_DCE3(rdev)) {
/* DCE3+ */
args.dvo_v3.ucAction = action;
args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
} else if (ASIC_IS_DCE2(rdev)) {
/* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
args.dvo.sDVOEncoder.ucAction = action;
args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
/* DFP1, CRT1, TV1 depending on the type of port */
args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
 
args.sXTmdsEncoder.ucEnable = action;
if (radeon_encoder->pixel_clock > 165000)
args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
} else {
/* R4xx, R5xx */
args.ext_tmds.sXTmdsEncoder.ucEnable = action;
 
if (radeon_encoder->pixel_clock > 165000)
args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
 
/*if (pScrn->rgbBits == 8)*/
args.sXTmdsEncoder.ucMisc |= (1 << 1);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
 
static void
atombios_ddia_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DVO_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
 
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
 
args.sDVOEncoder.ucAction = action;
args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
if (radeon_encoder->pixel_clock > 165000)
args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
union lvds_encoder_control {
488,14 → 540,12
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_connector_atom_dig *dig_connector =
radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
 
if (!dig || !dig_connector)
if (!dig)
return;
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
519,7 → 569,8
break;
}
 
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
532,17 → 583,17
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
} else {
if (dig_connector->linkb)
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
args.v1.ucMisc |= (1 << 1);
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
break;
case 2:
561,22 → 612,22
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
if (dig_connector->linkb)
if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
593,28 → 644,49
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
r600_hdmi_enable(encoder, hdmi_detected);
}
 
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
 
/* dp bridges are always DP */
if (radeon_encoder_is_dp_bridge(encoder))
return ATOM_ENCODER_MODE_DP;
 
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
return 0;
 
if (!connector) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
return ATOM_ENCODER_MODE_DVI;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
default:
return ATOM_ENCODER_MODE_CRT;
}
}
radeon_connector = to_radeon_connector(connector);
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid))
if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
} else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
622,9 → 694,13
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid))
if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
else
} else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
631,16 → 707,21
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
else
} else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
671,8 → 752,8
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* DCE 4.0
* - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
* DCE 4.0/5.0
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
683,6 → 764,12
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
* DCE 4.1
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
696,22 → 783,38
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
 
void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_connector_atom_dig *dig_connector =
radeon_get_atom_connector_priv_from_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0, num = 0;
int index = 0;
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int hpd_id = RADEON_HPD_NONE;
int bpc = 8;
 
if (!dig || !dig_connector)
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
bpc = connector->display_info.bpc;
}
 
/* no dig encoder assigned */
if (dig->dig_encoder == -1)
return;
 
memset(&args, 0, sizeof(args));
724,27 → 827,87
else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
}
num = dig->dig_encoder + 1;
 
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
args.v1.ucAction = action;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dig_connector->dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.ucLaneNum = dig_connector->dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
(args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
args.v1.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
 
if (ASIC_IS_DCE4(rdev)) {
if (ASIC_IS_DCE5(rdev)) {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
(args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
switch (bpc) {
case 0:
args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
else
args.v4.ucHPD_ID = hpd_id + 1;
} else if (ASIC_IS_DCE4(rdev)) {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
switch (bpc) {
case 0:
args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
} else {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
757,7 → 920,7
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
if (dig_connector->linkb)
if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
771,6 → 934,7
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
};
 
void
780,45 → 944,68
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_connector_atom_dig *dig_connector =
radeon_get_atom_connector_priv_from_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
union dig_transmitter_control args;
int index = 0, num = 0;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
int igp_lane_info = 0;
int dig_encoder = dig->dig_encoder;
 
if (!dig || !dig_connector)
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
connector = radeon_get_connector_for_encoder_init(encoder);
/* just needed to avoid bailing in the encoder check. the encoder
* isn't used for init
*/
dig_encoder = 0;
} else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
igp_lane_info = dig_connector->igp_lane_info;
}
 
/* no dig encoder assigned */
if (dig_encoder == -1)
return;
 
connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
break;
}
}
 
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = radeon_connector->connector_object_id;
args.v1.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
825,7 → 1012,7
} else {
if (is_dp)
args.v1.usPixelClock =
cpu_to_le16(dig_connector->dp_clock / 10);
cpu_to_le16(dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
833,16 → 1020,16
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
args.v3.ucLaneNum = dig_connector->dp_lane_count;
args.v3.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
 
if (dig_connector->linkb) {
if (dig->linkb)
args.v3.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v3.acConfig.ucEncoderSel = 1;
}
 
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
852,23 → 1039,33
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
 
if (ASIC_IS_DCE5(rdev)) {
/* On DCE5 DCPLL usually generates the DP ref clock */
if (is_dp) {
if (rdev->clock.dp_extclk)
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
} else
args.v4.acConfig.ucRefClkSource = pll_id;
} else {
/* On DCE4, if there is an external clock, it generates the DP ref clock */
if (is_dp && rdev->clock.dp_extclk)
args.v3.acConfig.ucRefClkSource = 2; /* external src */
else
args.v3.acConfig.ucRefClkSource = pll_id;
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v3.acConfig.ucTransmitterSel = 0;
num = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v3.acConfig.ucTransmitterSel = 1;
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v3.acConfig.ucTransmitterSel = 2;
num = 2;
break;
}
 
877,25 → 1074,23
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
args.v3.acConfig.fDualLinkConnector = 1;
}
} else if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
args.v2.acConfig.ucEncoderSel = dig_encoder;
if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
num = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
num = 2;
break;
}
 
904,41 → 1099,37
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
args.v2.acConfig.fDualLinkConnector = 1;
}
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
 
if (dig->dig_encoder)
if (dig_encoder)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
if (dig_connector->igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (dig_connector->igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
} else {
if (dig_connector->igp_lane_info & 0x1)
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (dig_connector->igp_lane_info & 0x2)
else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
else if (dig_connector->igp_lane_info & 0x4)
else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
else if (dig_connector->igp_lane_info & 0x8)
else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
break;
}
 
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
 
if (dig_connector->linkb)
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
948,6 → 1139,8
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
}
 
954,7 → 1147,181
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
 
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
goto done;
 
if (!ASIC_IS_DCE4(rdev))
goto done;
 
if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
(action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
goto done;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
goto done;
 
memset(&args, 0, sizeof(args));
 
args.v1.ucAction = action;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
int i;
 
for (i = 0; i < 300; i++) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
return true;
mdelay(1);
}
return false;
}
done:
return true;
}
 
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
 
static void
atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector;
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
u8 frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
int bpc = 8;
 
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
connector = radeon_get_connector_for_encoder_init(encoder);
else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
bpc = connector->display_info.bpc;
}
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
/* no params on frev 1 */
break;
case 2:
switch (crev) {
case 1:
case 2:
args.v1.sDigEncoder.ucAction = action;
args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dp_clock == 270000)
args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.sDigEncoder.ucLaneNum = 8;
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
case 3:
args.v3.sExtEncoder.ucAction = action;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
else
args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dp_clock == 270000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v3.sExtEncoder.ucLaneNum = 8;
else
args.v3.sExtEncoder.ucLaneNum = 4;
switch (ext_enum) {
case GRAPH_OBJECT_ENUM_ID1:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
break;
case GRAPH_OBJECT_ENUM_ID2:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
break;
case GRAPH_OBJECT_ENUM_ID3:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
break;
}
switch (bpc) {
case 0:
args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void
atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
997,13 → 1364,16
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
bool is_dce5_dac = false;
bool is_dce5_dvo = false;
 
memset(&args, 0, sizeof(args));
 
DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
switch (radeon_encoder->encoder_id) {
1019,7 → 1389,14
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
if (ASIC_IS_DCE5(rdev))
is_dce5_dvo = true;
else if (ASIC_IS_DCE3(rdev))
is_dig = true;
else
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1033,6 → 1410,9
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (ASIC_IS_DCE5(rdev))
is_dce5_dac = true;
else {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1039,6 → 1419,7
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1055,34 → 1436,120
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
{
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
dp_link_train(encoder, connector);
 
if (connector &&
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector =
radeon_connector->con_priv;
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (connector &&
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector =
radeon_connector->con_priv;
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
radeon_dig_connector->edp_on = false;
}
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
} else if (is_dce5_dac) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dac_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dac_setup(encoder, ATOM_DISABLE);
break;
}
} else if (is_dce5_dvo) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
}
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLOFF;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
if (ext_encoder) {
int action;
 
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
if (ASIC_IS_DCE41(rdev))
action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
else
action = ATOM_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev))
action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
else
action = ATOM_DISABLE;
break;
}
atombios_external_encoder_setup(encoder, ext_encoder, action);
}
 
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
/* adjust pm to dpms change */
radeon_pm_compute_clocks(rdev);
}
 
union crtc_source_param {
1104,7 → 1571,8
 
memset(&args, 0, sizeof(args));
 
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
1212,10 → 1680,13
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
break;
return;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* update scratch regs with new routing */
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
 
static void
1242,13 → 1713,23
}
 
/* set scaler clears this on some chips */
/* XXX check DCE4 */
if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
if (ASIC_IS_AVIVO(rdev) &&
(!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
} else {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
}
}
}
 
static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
{
1260,25 → 1741,27
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
 
/* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
struct radeon_connector_atom_dig *dig_connector =
radeon_get_atom_connector_priv_from_encoder(encoder);
 
dig = radeon_encoder->enc_priv;
if (ASIC_IS_DCE41(rdev))
return radeon_crtc->crtc_id;
else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig_connector->linkb)
if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig_connector->linkb)
if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig_connector->linkb)
if (dig->linkb)
return 5;
else
return 4;
1285,6 → 1768,7
break;
}
}
}
 
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
1318,6 → 1802,34
return 1;
}
 
/* This only needs to be called once at startup */
void
radeon_atom_encoder_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_encoder *encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
break;
default:
break;
}
 
if (ext_encoder && ASIC_IS_DCE41(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
}
}
 
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
1326,20 → 1838,11
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig)
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
 
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
atombios_set_encoder_crtc_source(encoder);
 
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
1361,29 → 1864,25
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
 
/* init and enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
 
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_external_tmds_setup(encoder, ATOM_ENABLE);
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1390,16 → 1889,30
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_ENABLE);
else
atombios_tv_setup(encoder, ATOM_DISABLE);
}
break;
}
 
if (ext_encoder) {
if (ASIC_IS_DCE41(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
}
 
atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
/* XXX */
if (!ASIC_IS_DCE4(rdev))
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
r600_hdmi_setmode(encoder, adjusted_mode);
}
}
 
static bool
atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1418,7 → 1931,8
 
memset(&args, 0, sizeof(args));
 
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return false;
 
args.sDacload.ucMisc = 0;
 
1459,7 → 1973,7
uint32_t bios_0_scratch;
 
if (!atombios_dac_load_detect(encoder, connector)) {
DRM_DEBUG("detect returned false \n");
DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
 
1468,7 → 1982,7
else
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
 
DRM_DEBUG("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
1492,10 → 2006,37
 
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if ((radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig)
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
 
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
/* select the clock/data port if it uses a router */
if (radeon_connector->router.cd_valid)
radeon_router_select_cd_port(radeon_connector);
 
/* turn eDP panel on for mode set */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
}
 
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
}
 
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
{
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
1504,11 → 2045,68
 
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
 
/* check for pre-DCE3 cards with shared encoders;
* can't really use the links individually, so don't disable
* the encoder if it's in use by another connector
*/
if (!ASIC_IS_DCE3(rdev)) {
struct drm_encoder *other_encoder;
struct radeon_encoder *other_radeon_encoder;
 
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
other_radeon_encoder = to_radeon_encoder(other_encoder);
if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
drm_helper_encoder_in_use(other_encoder))
goto disable_done;
}
}
 
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
if (ASIC_IS_DCE4(rdev))
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_DISABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_DISABLE);
break;
}
 
disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
1515,6 → 2113,53
radeon_encoder->active_device = 0;
}
 
/* these are handled by the primary encoders */
static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
{
 
}
 
static void radeon_atom_ext_commit(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
 
}
 
static void radeon_atom_ext_disable(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
{
 
}
 
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
 
static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
.dpms = radeon_atom_ext_dpms,
.mode_fixup = radeon_atom_ext_mode_fixup,
.prepare = radeon_atom_ext_prepare,
.mode_set = radeon_atom_ext_mode_set,
.commit = radeon_atom_ext_commit,
.disable = radeon_atom_ext_disable,
/* no detect for TMDS/LVDS yet */
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
1549,12 → 2194,14
struct radeon_encoder_atom_dac *
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
 
if (!dac)
return NULL;
 
dac->tv_std = TV_STD_NTSC;
dac->tv_std = radeon_atombios_get_tv_info(rdev);
return dac;
}
 
1561,6 → 2208,7
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
if (!dig)
1570,11 → 2218,19
dig->coherent_mode = true;
dig->dig_encoder = -1;
 
if (encoder_enum == 2)
dig->linkb = true;
else
dig->linkb = false;
 
return dig;
}
 
void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
radeon_add_atom_encoder(struct drm_device *dev,
uint32_t encoder_enum,
uint32_t supported_device,
u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
1583,7 → 2239,7
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->encoder_id == encoder_id) {
if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
1611,9 → 2267,13
 
radeon_encoder->enc_priv = NULL;
 
radeon_encoder->encoder_id = encoder_id;
radeon_encoder->encoder_enum = encoder_enum;
radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
radeon_encoder->caps = caps;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1632,6 → 2292,7
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1652,6 → 2313,9
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1658,7 → 2322,24
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_SI170B:
case ENCODER_OBJECT_ID_CH7303:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
else
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
break;
}
 
r600_hdmi_init(encoder);
}
/drivers/video/drm/radeon/radeon_family.h
36,7 → 36,7
* Radeon chip families
*/
enum radeon_family {
CHIP_R100,
CHIP_R100 = 0,
CHIP_RV100,
CHIP_RS100,
CHIP_RV200,
80,6 → 80,13
CHIP_JUNIPER,
CHIP_CYPRESS,
CHIP_HEMLOCK,
CHIP_PALM,
CHIP_SUMO,
CHIP_SUMO2,
CHIP_BARTS,
CHIP_TURKS,
CHIP_CAICOS,
CHIP_CAYMAN,
CHIP_LAST,
};
 
99,4 → 106,5
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
};
 
#endif
/drivers/video/drm/radeon/radeon_fb.c
23,10 → 23,6
* Authors:
* David Airlie
*/
/*
* Modularization
*/
 
#include <linux/module.h>
#include <linux/fb.h>
 
42,12 → 38,14
#include <drm_mm.h>
#include "radeon_object.h"
 
 
struct fb_info *framebuffer_alloc(size_t size, void *dev);
 
struct radeon_fb_device {
/* object hierarchy -
this contains a helper + a radeon fb
the helper contains a pointer to radeon framebuffer baseclass.
*/
struct radeon_fbdev {
struct drm_fb_helper helper;
struct radeon_framebuffer *rfb;
struct radeon_framebuffer rfb;
struct list_head fbdev_list;
struct radeon_device *rdev;
};
 
55,7 → 53,6
// .owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcolreg = drm_fb_helper_setcolreg,
// .fb_fillrect = cfb_fillrect,
// .fb_copyarea = cfb_copyarea,
// .fb_imageblit = cfb_imageblit,
64,46 → 61,7
.fb_setcmap = drm_fb_helper_setcmap,
};
 
/**
* Currently it is assumed that the old framebuffer is reused.
*
* LOCKING
* caller should hold the mode config lock.
*
*/
int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
{
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_display_mode *mode = crtc->desired_mode;
 
fb = crtc->fb;
if (fb == NULL) {
return 1;
}
info = fb->fbdev;
if (info == NULL) {
return 1;
}
if (mode == NULL) {
return 1;
}
info->var.xres = mode->hdisplay;
info->var.right_margin = mode->hsync_start - mode->hdisplay;
info->var.hsync_len = mode->hsync_end - mode->hsync_start;
info->var.left_margin = mode->htotal - mode->hsync_end;
info->var.yres = mode->vdisplay;
info->var.lower_margin = mode->vsync_start - mode->vdisplay;
info->var.vsync_len = mode->vsync_end - mode->vsync_start;
info->var.upper_margin = mode->vtotal - mode->vsync_end;
info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
/* avoid overflow */
info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
 
return 0;
}
EXPORT_SYMBOL(radeonfb_resize);
 
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
int aligned = width;
128,57 → 86,45
return aligned;
}
 
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
};
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
struct radeon_bo *rbo = gobj->driver_private;
int ret;
 
int radeonfb_create(struct drm_device *dev,
uint32_t fb_width, uint32_t fb_height,
uint32_t surface_width, uint32_t surface_height,
uint32_t surface_depth, uint32_t surface_bpp,
struct drm_framebuffer **fb_p)
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
// drm_gem_object_unreference_unlocked(gobj);
}
 
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = dev->dev_private;
struct fb_info *info;
struct radeon_fb_device *rfbdev;
struct drm_framebuffer *fb = NULL;
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
u64 fb_gpuaddr;
void *fbptr = NULL;
unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
 
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
 
/* avivo can't scanout real 24bpp */
if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
surface_bpp = 32;
 
mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
mode_cmd.depth = surface_depth;
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
size = mode_cmd.pitch * mode_cmd.height;
size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
 
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, ttm_bo_type_kernel,
&gobj);
// ret = radeon_gem_object_create(rdev, aligned_size, 0,
// RADEON_GEM_DOMAIN_VRAM,
// false, true,
// &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
surface_width, surface_height);
ret = -ENOMEM;
goto out;
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
return -ENOMEM;
}
rbo = gobj->driver_private;
 
186,7 → 132,7
tiling_flags = RADEON_TILING_MACRO;
 
#ifdef __BIG_ENDIAN
switch (mode_cmd.bpp) {
switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
200,21 → 146,16
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
mode_cmd.pitch);
mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
mutex_lock(&rdev->ddev->struct_mutex);
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
if (fb == NULL) {
DRM_ERROR("failed to allocate fb.\n");
ret = -ENOMEM;
goto out_unref;
}
 
 
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
221,61 → 162,98
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
ret = radeon_bo_kmap(rbo, &fbptr);
ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
 
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
*gobj_p = gobj;
return 0;
out_unref:
radeonfb_destroy_pinned_object(gobj);
*gobj_p = NULL;
return ret;
}
 
*fb_p = fb;
rfb = to_radeon_framebuffer(fb);
rdev->fbdev_rfb = rfb;
rdev->fbdev_rbo = rbo;
static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int ret;
unsigned long tmp;
 
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
ENTER();
 
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
 
/* avivo can't scanout real 24bpp */
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
 
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.depth = sizes->surface_depth;
 
// ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
// rbo = gobj->driver_private;
 
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
 
rdev->fbdev_info = info;
rfbdev = info->par;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
rfbdev->helper.dev = dev;
ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
RADEONFB_CONN_LIMIT);
if (ret)
goto out_unref;
info->par = rfbdev;
 
#if 0
radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
fb = &rfbdev->rfb.base;
 
/* setup helper */
rfbdev->helper.fb = fb;
rfbdev->helper.fbdev = info;
 
// memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
 
strcpy(info->fix.id, "radeondrmfb");
 
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 
info->flags = FBINFO_DEFAULT;
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
 
tmp = fb_gpuaddr - rdev->mc.vram_start;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
info->screen_size = size;
info->fix.smem_len = radeon_bo_size(rbo);
info->screen_base = rbo->kptr;
info->screen_size = radeon_bo_size(rbo);
 
drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
/* setup aperture base/size for vesafb takeover */
info->aperture_base = rdev->ddev->mode_config.fb_base;
info->aperture_size = rdev->mc.real_vram_size;
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unref;
}
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
 
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
// info->pixmap.size = 64*1024;
// info->pixmap.buf_align = 8;
// info->pixmap.access_align = 32;
// info->pixmap.flags = FB_PIXMAP_SYSTEM;
// info->pixmap.scan_align = 1;
 
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
282,77 → 260,128
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)size);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
#endif
 
fb->fbdev = info;
rfbdev->rfb = rfb;
rfbdev->rdev = rdev;
LEAVE();
 
mutex_unlock(&rdev->ddev->struct_mutex);
return 0;
 
out_unref:
if (rbo) {
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unreserve(rbo);
 
}
}
if (fb && ret) {
list_del(&fb->filp_head);
// drm_gem_object_unreference(gobj);
// drm_framebuffer_cleanup(fb);
kfree(fb);
}
// drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
out:
return ret;
}
 
int radeonfb_probe(struct drm_device *dev)
static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
int new_fb = 0;
int ret;
 
if (!helper->fb) {
ret = radeonfb_create(rfbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
 
if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev;
 
// unregister_framebuffer(info);
// framebuffer_release(info);
}
 
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
}
// drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_cleanup(&rfb->base);
 
return 0;
}
 
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
.fb_probe = radeon_fb_find_or_create_single,
};
 
int radeon_fbdev_init(struct radeon_device *rdev)
{
struct radeon_fbdev *rfbdev;
int bpp_sel = 32;
int ret;
 
ENTER();
 
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
 
return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
if (!rfbdev)
return -ENOMEM;
 
rfbdev->rdev = rdev;
rdev->mode_info.rfbdev = rfbdev;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
 
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
RADEONFB_CONN_LIMIT);
if (ret) {
kfree(rfbdev);
return ret;
}
 
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
 
LEAVE();
 
return 0;
}
 
void radeon_fbdev_fini(struct radeon_device *rdev)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
struct radeon_bo *rbo;
int r;
if (!rdev->mode_info.rfbdev)
return;
 
if (!fb) {
return -EINVAL;
radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
kfree(rdev->mode_info.rfbdev);
rdev->mode_info.rfbdev = NULL;
}
info = fb->fbdev;
if (info) {
struct radeon_fb_device *rfbdev = info->par;
rbo = rfb->obj->driver_private;
// unregister_framebuffer(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
drm_fb_helper_free(&rfbdev->helper);
framebuffer_release(info);
}
 
printk(KERN_INFO "unregistered panic notifier\n");
 
return 0;
int radeon_fbdev_total_size(struct radeon_device *rdev)
{
struct radeon_bo *robj;
int size = 0;
 
robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
size += radeon_bo_size(robj);
return size;
}
EXPORT_SYMBOL(radeonfb_remove);
MODULE_LICENSE("GPL");
 
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
return true;
return false;
}
/drivers/video/drm/radeon/radeon_fence.c
57,7 → 57,6
radeon_fence_ring_emit(rdev, fence);
 
fence->emited = true;
fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
70,15 → 69,42
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
unsigned long cjiffies;
 
if (rdev == NULL) {
return true;
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
seq = rdev->wb.wb[scratch_index/4];
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
cjiffies -= rdev->fence_drv.last_jiffies;
if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
/* update the timeout */
rdev->fence_drv.last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
rdev->fence_drv.last_timeout = 1;
}
if (rdev->shutdown) {
return true;
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
rdev->fence_drv.last_jiffies = cjiffies;
}
seq = RREG32(rdev->fence_drv.scratch_reg);
rdev->fence_drv.last_seq = seq;
return false;
}
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
170,9 → 196,8
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
unsigned long cur_jiffies;
unsigned long timeout;
bool expired = false;
unsigned long irq_flags, timeout;
u32 seq;
int r;
 
if (fence == NULL) {
183,21 → 208,18
if (radeon_fence_signaled(fence)) {
return 0;
}
 
timeout = rdev->fence_drv.last_timeout;
retry:
cur_jiffies = jiffies;
timeout = HZ / 100;
if (time_after(fence->timeout, cur_jiffies)) {
timeout = fence->timeout - cur_jiffies;
}
 
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r < 0))
if (unlikely(r < 0)) {
return r;
}
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
205,38 → 227,37
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) {
expired = true;
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
*/
if (r) {
timeout = r;
goto retry;
}
if (unlikely(expired)) {
timeout = 1;
if (time_after(cur_jiffies, fence->timeout)) {
timeout = cur_jiffies - fence->timeout;
}
timeout = jiffies_to_msecs(timeout);
if (timeout > 500) {
DRM_ERROR("fence(%p:0x%08X) %lums timeout "
"going to reset GPU\n",
fence, fence->seq, timeout);
radeon_gpu_reset(rdev);
/* don't protect read access to rdev->fence_drv.last_seq
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
rdev->gpu_lockup = true;
r = radeon_gpu_reset(rdev);
if (r)
return r;
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
rdev->gpu_lockup = false;
}
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
rdev->fence_drv.last_jiffies = jiffies;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
if (unlikely(expired)) {
rdev->fence_drv.count_timeout++;
cur_jiffies = jiffies;
timeout = 1;
if (time_after(cur_jiffies, fence->timeout)) {
timeout = cur_jiffies - fence->timeout;
}
timeout = jiffies_to_msecs(timeout);
DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
fence, fence->seq, timeout);
DRM_ERROR("last signaled fence(0x%08X)\n",
rdev->fence_drv.last_seq);
}
return 0;
}
 
332,7 → 353,6
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
/drivers/video/drm/radeon/radeon_gart.c
90,8 → 90,8
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
true, RADEON_GEM_DOMAIN_VRAM,
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
if (r) {
return r;
190,7 → 190,7
offset, pages, pagelist);
 
if (!rdev->gart.ready) {
DRM_ERROR("trying to bind memory to unitialized GART !\n");
WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
283,5 → 283,3
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
}
 
 
/drivers/video/drm/radeon/radeon_gem.c
64,7 → 64,7
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
r = radeon_fb_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
if (r) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
size, initial_domain, alignment);
153,9 → 153,12
struct drm_radeon_gem_info *args = data;
 
args->vram_size = rdev->mc.real_vram_size;
/* FIXME: report somethings that makes sense */
args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
args->gart_size = rdev->mc.gtt_size;
args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
 
193,15 → 196,11
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
return r;
}
mutex_lock(&dev->struct_mutex);
drm_gem_object_handle_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
args->handle = handle;
return 0;
}
222,15 → 221,13
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
return -ENOENT;
}
robj = gobj->driver_private;
 
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
 
243,13 → 240,11
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
return -ENOENT;
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
 
264,7 → 259,7
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
return -ENOENT;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, &cur_placement, true);
280,9 → 275,7
default:
break;
}
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
 
296,7 → 289,7
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
return -ENOENT;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
303,9 → 296,7
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
 
320,12 → 311,10
DRM_DEBUG("%d \n", args->handle);
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -EINVAL;
return -ENOENT;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
 
/drivers/video/drm/radeon/radeon_i2c.c
52,6 → 52,10
}
};
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
if (ret == 2)
return true;
59,6 → 63,7
return false;
}
 
/* bit banging i2c */
 
static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
94,6 → 99,13
}
}
 
/* switch the pads to ddc mode */
if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
temp = RREG32(rec->mask_clk_reg);
temp &= ~(1 << 16);
WREG32(rec->mask_clk_reg, temp);
}
 
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
181,13 → 193,30
WREG32(rec->en_data_reg, val);
}
 
static int pre_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 
radeon_i2c_do_lock(i2c, 1);
 
return 0;
}
 
static void post_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 
radeon_i2c_do_lock(i2c, 0);
}
 
/* hw i2c */
 
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
{
struct radeon_pll *spll = &rdev->clock.spll;
u32 sclk = radeon_get_engine_clock(rdev);
u32 sclk = rdev->pm.current_sclk;
u32 prescale = 0;
u32 n, m;
u8 loop;
u32 nm;
u8 n, m, loop;
int i2c_clock;
 
switch (rdev->family) {
203,13 → 232,15
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
n = (spll->reference_freq) / (4 * 6);
i2c_clock = 60;
nm = (sclk * 10) / (i2c_clock * 4);
for (loop = 1; loop < 255; loop++) {
if ((loop * (loop - 1)) > n)
if ((nm / loop) < loop)
break;
}
m = loop - 1;
prescale = m | (loop << 8);
n = loop - 1;
m = loop - 2;
prescale = m | (n << 8);
break;
case CHIP_RV380:
case CHIP_RS400:
217,7 → 248,6
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
sclk = radeon_get_engine_clock(rdev);
prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
break;
case CHIP_RS600:
232,7 → 262,6
case CHIP_RV570:
case CHIP_R580:
i2c_clock = 50;
sclk = radeon_get_engine_clock(rdev);
if (rdev->family == CHIP_R520)
prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
else
291,6 → 320,7
prescale = radeon_get_i2c_prescale(rdev);
 
reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
RADEON_I2C_DRIVE_EN |
RADEON_I2C_START |
RADEON_I2C_STOP |
RADEON_I2C_GO);
757,27 → 787,16
return ret;
}
 
static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
int ret;
 
radeon_i2c_do_lock(i2c, 1);
ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
radeon_i2c_do_lock(i2c, 0);
 
return ret;
}
 
static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
int ret;
int ret = 0;
 
ENTER();
 
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
797,16 → 816,12
case CHIP_RV410:
case CHIP_RS400:
case CHIP_RS480:
if (rec->hw_capable)
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
else
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_RS600:
case CHIP_RS690:
case CHIP_RS740:
/* XXX fill in hw i2c implementation */
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_RV515:
case CHIP_R520:
814,13 → 829,10
case CHIP_RV560:
case CHIP_RV570:
case CHIP_R580:
if (rec->hw_capable) {
if (rec->mm_i2c)
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
else
ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
} else
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_R600:
case CHIP_RV610:
827,7 → 839,6
case CHIP_RV630:
case CHIP_RV670:
/* XXX fill in hw i2c implementation */
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_RV620:
case CHIP_RV635:
838,7 → 849,6
case CHIP_RV710:
case CHIP_RV740:
/* XXX fill in hw i2c implementation */
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
846,7 → 856,6
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
/* XXX fill in hw i2c implementation */
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
default:
DRM_ERROR("i2c: unhandled radeon chip\n");
853,18 → 862,19
ret = -EIO;
break;
}
LEAVE();
 
return ret;
}
 
static u32 radeon_i2c_func(struct i2c_adapter *adap)
static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
 
static const struct i2c_algorithm radeon_i2c_algo = {
.master_xfer = radeon_i2c_xfer,
.functionality = radeon_i2c_func,
.master_xfer = radeon_hw_i2c_xfer,
.functionality = radeon_hw_i2c_func,
};
 
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
871,6 → 881,7
struct radeon_i2c_bus_rec *rec,
const char *name)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_chan *i2c;
int ret;
 
878,33 → 889,46
if (i2c == NULL)
return NULL;
 
/* set the internal bit adapter */
// i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
// sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
i2c->algo.radeon.bit_data.setsda = set_data;
i2c->algo.radeon.bit_data.setscl = set_clock;
i2c->algo.radeon.bit_data.getsda = get_data;
i2c->algo.radeon.bit_data.getscl = get_clock;
i2c->algo.radeon.bit_data.udelay = 20;
i2c->rec = *rec;
// i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
if (rec->mm_i2c ||
(rec->hw_capable &&
radeon_hw_i2c &&
((rdev->family <= CHIP_RS480) ||
((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
/* set the radeon hw i2c adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_i2c_algo;
// ret = i2c_add_adapter(&i2c->adapter);
// if (ret) {
// DRM_ERROR("Failed to register hw i2c %s\n", name);
// goto out_free;
// }
} else {
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon i2c bit bus %s", name);
i2c->adapter.algo_data = &i2c->algo.bit;
i2c->algo.bit.pre_xfer = pre_xfer;
i2c->algo.bit.post_xfer = post_xfer;
i2c->algo.bit.setsda = set_data;
i2c->algo.bit.setscl = set_clock;
i2c->algo.bit.getsda = get_data;
i2c->algo.bit.getscl = get_clock;
i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
i2c->algo.radeon.bit_data.timeout = 2;
i2c->algo.radeon.bit_data.data = i2c;
ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
i2c->algo.bit.timeout = 2;
i2c->algo.bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_ERROR("Failed to register internal bit i2c %s\n", name);
DRM_ERROR("Failed to register bit i2c %s\n", name);
goto out_free;
}
/* set the radeon i2c adapter */
i2c->dev = dev;
i2c->rec = *rec;
// i2c->adapter.owner = THIS_MODULE;
i2c_set_adapdata(&i2c->adapter, i2c);
// sprintf(i2c->adapter.name, "Radeon i2c %s", name);
i2c->adapter.algo_data = &i2c->algo.radeon;
i2c->adapter.algo = &radeon_i2c_algo;
}
 
return i2c;
out_free:
926,7 → 950,10
 
i2c->rec = *rec;
// i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->dev = dev;
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon aux bus %s", name);
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
948,16 → 975,62
{
if (!i2c)
return;
// i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
 
void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
/* Add the default buses */
void radeon_i2c_init(struct radeon_device *rdev)
{
if (!i2c)
if (rdev->is_atom_bios)
radeon_atombios_i2c_init(rdev);
else
radeon_combios_i2c_init(rdev);
}
 
/* remove all the buses */
void radeon_i2c_fini(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
if (rdev->i2c_bus[i]) {
radeon_i2c_destroy(rdev->i2c_bus[i]);
rdev->i2c_bus[i] = NULL;
}
}
}
 
/* Add additional buses */
void radeon_i2c_add(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
struct drm_device *dev = rdev->ddev;
int i;
 
for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
if (!rdev->i2c_bus[i]) {
rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
return;
}
}
}
 
kfree(i2c);
/* looks up bus based on id */
struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *i2c_bus)
{
int i;
 
for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
if (rdev->i2c_bus[i] &&
(rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
return rdev->i2c_bus[i];
}
}
return NULL;
}
 
struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
993,7 → 1066,7
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
1015,7 → 1088,63
out_buf[1] = val;
 
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}
 
/* ddc router switching */
void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
{
u8 val;
 
if (!radeon_connector->router.ddc_valid)
return;
 
if (!radeon_connector->router_bus)
return;
 
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
val &= ~radeon_connector->router.ddc_mux_control_pin;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, val);
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, &val);
val &= ~radeon_connector->router.ddc_mux_control_pin;
val |= radeon_connector->router.ddc_mux_state;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, val);
}
 
/* clock/data router switching */
void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
{
u8 val;
 
if (!radeon_connector->router.cd_valid)
return;
 
if (!radeon_connector->router_bus)
return;
 
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
val &= ~radeon_connector->router.cd_mux_control_pin;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, val);
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, &val);
val &= ~radeon_connector->router.cd_mux_control_pin;
val |= radeon_connector->router.cd_mux_state;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, val);
}
 
/drivers/video/drm/radeon/radeon_legacy_crtc.c
26,7 → 26,7
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon_fixed.h"
#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
 
272,7 → 272,7
if (!ref_div)
return 1;
 
vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
 
/*
* This is horribly crude: the VCO frequency range is divided into
314,6 → 314,9
 
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
335,6 → 338,9
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
}
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
}
}
342,10 → 348,25
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
}
 
int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
}
 
int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t base;
356,16 → 377,23
uint32_t gen_cntl_reg, gen_cntl_val;
int r;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
/* no fb bound */
if (!crtc->fb) {
DRM_DEBUG("No FB bound\n");
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
 
if (atomic) {
radeon_fb = to_radeon_framebuffer(fb);
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
 
switch (crtc->fb->bits_per_pixel) {
switch (target_fb->bits_per_pixel) {
case 8:
format = 2;
break;
387,7 → 415,7
 
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = obj->driver_private;
rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
409,13 → 437,13
 
crtc_offset_cntl = 0;
 
pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) +
((crtc->fb->bits_per_pixel * 8) - 1)) /
(crtc->fb->bits_per_pixel * 8));
pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
 
 
crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
437,7 → 465,7
crtc_tile_x0_y0 = x | (y << 16);
base &= ~0x7ff;
} else {
int byteshift = crtc->fb->bits_per_pixel >> 4;
int byteshift = target_fb->bits_per_pixel >> 4;
int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
crtc_offset_cntl |= (y % 16);
444,7 → 472,7
}
} else {
int offset = y * pitch_pixels + x;
switch (crtc->fb->bits_per_pixel) {
switch (target_fb->bits_per_pixel) {
case 8:
offset *= 1;
break;
474,6 → 502,7
gen_cntl_val = RREG32(gen_cntl_reg);
gen_cntl_val &= ~(0xf << 8);
gen_cntl_val |= (format << 8);
gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
WREG32(gen_cntl_reg, gen_cntl_val);
 
crtc_offset = (u32)base;
490,9 → 519,9
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
 
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
rbo = radeon_fb->obj->driver_private;
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
522,7 → 551,7
uint32_t crtc_v_sync_strt_wid;
bool is_tv = false;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
603,6 → 632,10
? RADEON_CRTC2_INTERLACE_EN
: 0));
 
/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
crtc2_gen_cntl |= RADEON_CRTC2_EN;
 
disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
 
630,6 → 663,10
? RADEON_CRTC_INTERLACE_EN
: 0));
 
/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
crtc_gen_cntl |= RADEON_CRTC_EN;
 
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
RADEON_CRTC_VSYNC_DIS |
703,10 → 740,6
pll = &rdev->clock.p1pll;
 
pll->flags = RADEON_PLL_LEGACY;
if (radeon_new_pll == 1)
pll->algo = PLL_ALGO_NEW;
else
pll->algo = PLL_ALGO_LEGACY;
 
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
743,10 → 776,10
}
}
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
radeon_compute_pll_legacy(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div,
&reference_div, &post_divider);
 
758,7 → 791,7
if (!post_div->divider)
post_div = &post_divs[0];
 
DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
(unsigned)freq,
feedback_div,
reference_div,
827,12 → 860,12
| RADEON_P2PLL_SLEEP
| RADEON_P2PLL_ATOMIC_UPDATE_EN));
 
DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
(unsigned)pll_ref_div,
(unsigned)pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_P2PLL_CNTL));
DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
(unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
(unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
(unsigned)((pll_fb_post_div &
856,7 → 889,7
}
 
if (rdev->flags & RADEON_IS_MOBILITY) {
/* A temporal workaround for the occational blanking on certain laptop panels.
/* A temporal workaround for the occasional blanking on certain laptop panels.
This appears to related to the PLL divider registers (fail to lock?).
It occurs even when all dividers are the same with their old settings.
In this case we really don't need to fiddle with PLL registers.
933,12 → 966,12
| RADEON_PPLL_ATOMIC_UPDATE_EN
| RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
 
DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
pll_ref_div,
pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_PPLL_CNTL));
DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
(pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
958,6 → 991,12
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
1020,6 → 1059,7
.mode_fixup = radeon_crtc_mode_fixup,
.mode_set = radeon_crtc_mode_set,
.mode_set_base = radeon_crtc_set_base,
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
/drivers/video/drm/radeon/radeon_legacy_encoders.c
39,7 → 39,7
radeon_encoder->active_device = 0;
}
 
static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
47,15 → 47,23
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
bool is_mac = false;
DRM_DEBUG("\n");
uint8_t backlight_level;
DRM_DEBUG_KMS("\n");
 
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
panel_pwr_delay = lvds->panel_pwr_delay;
if (lvds->bl_dev)
backlight_level = lvds->backlight_level;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
panel_pwr_delay = lvds->panel_pwr_delay;
if (lvds->bl_dev)
backlight_level = lvds->backlight_level;
}
}
 
82,11 → 90,13
lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
 
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
RADEON_LVDS_BL_MOD_LEVEL_MASK);
lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
(backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
break;
95,7 → 105,6
case DRM_MODE_DPMS_OFF:
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
if (is_mac) {
lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
108,6 → 117,7
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
udelay(panel_pwr_delay * 1000);
break;
}
 
116,10 → 126,27
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
/* adjust pm to dpms change */
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DRM_DEBUG("\n");
 
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
lvds->dpms_mode = mode;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
lvds->dpms_mode = mode;
}
}
 
radeon_legacy_lvds_update(encoder, mode);
}
 
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
152,7 → 179,7
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
168,7 → 195,7
} else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
lvds_gen_cntl = lvds->lvds_gen_cntl;
lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
(0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
217,27 → 244,14
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
int mode_id = adjusted_mode->base.id;
*adjusted_mode = *native_mode;
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
adjusted_mode->crtc_hdisplay = mode->hdisplay;
adjusted_mode->crtc_vdisplay = mode->vdisplay;
adjusted_mode->base.id = mode_id;
}
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
 
return true;
}
251,9 → 265,222
.disable = radeon_legacy_encoder_disable,
};
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
#define MAX_RADEON_LEVEL 0xFF
 
struct radeon_backlight_privdata {
struct radeon_encoder *encoder;
uint8_t negative;
};
 
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
uint8_t level;
 
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > MAX_RADEON_LEVEL)
level = MAX_RADEON_LEVEL;
else
level = bd->props.brightness;
 
if (pdata->negative)
level = MAX_RADEON_LEVEL - level;
 
return level;
}
 
static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
struct radeon_encoder *radeon_encoder = pdata->encoder;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
int dpms_mode = DRM_MODE_DPMS_ON;
 
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
dpms_mode = lvds->dpms_mode;
lvds->backlight_level = radeon_legacy_lvds_level(bd);
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
dpms_mode = lvds->dpms_mode;
lvds->backlight_level = radeon_legacy_lvds_level(bd);
}
}
 
if (bd->props.brightness > 0)
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
else
radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
 
return 0;
}
 
static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
struct radeon_encoder *radeon_encoder = pdata->encoder;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
uint8_t backlight_level;
 
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
}
 
static const struct backlight_ops radeon_backlight_ops = {
.get_brightness = radeon_legacy_backlight_get_brightness,
.update_status = radeon_legacy_backlight_update_status,
};
 
void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct backlight_device *bd;
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
uint8_t backlight_level;
 
if (!radeon_encoder->enc_priv)
return;
 
#ifdef CONFIG_PMAC_BACKLIGHT
if (!pmac_has_backlight_type("ati") &&
!pmac_has_backlight_type("mnca"))
return;
#endif
 
pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
goto error;
}
 
props.max_brightness = MAX_RADEON_LEVEL;
props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
goto error;
}
 
pdata->encoder = radeon_encoder;
 
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
/* First, try to detect backlight level sense based on the assumption
* that firmware set it up at full brightness
*/
if (backlight_level == 0)
pdata->negative = true;
else if (backlight_level == 0xff)
pdata->negative = false;
else {
/* XXX hack... maybe some day we can figure out in what direction
* backlight should work on a given panel?
*/
pdata->negative = (rdev->family != CHIP_RV200 &&
rdev->family != CHIP_RV250 &&
rdev->family != CHIP_RV280 &&
rdev->family != CHIP_RV350);
 
#ifdef CONFIG_PMAC_BACKLIGHT
pdata->negative = (pdata->negative ||
of_machine_is_compatible("PowerBook4,3") ||
of_machine_is_compatible("PowerBook6,3") ||
of_machine_is_compatible("PowerBook6,5"));
#endif
}
 
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
lvds->bl_dev = bd;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
lvds->bl_dev = bd;
}
 
bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
 
DRM_INFO("radeon legacy LVDS backlight initialized\n");
 
return;
 
error:
kfree(pdata);
return;
}
 
static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct backlight_device *bd = NULL;
 
if (!radeon_encoder->enc_priv)
return;
 
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
bd = lvds->bl_dev;
lvds->bl_dev = NULL;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
bd = lvds->bl_dev;
lvds->bl_dev = NULL;
}
 
if (bd) {
struct radeon_legacy_backlight_privdata *pdata;
 
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
kfree(pdata);
 
DRM_INFO("radeon legacy LVDS backlight unloaded\n");
}
}
 
#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
 
void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
{
}
 
static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
{
}
 
#endif
 
 
static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
if (radeon_encoder->enc_priv) {
radeon_legacy_backlight_exit(radeon_encoder);
kfree(radeon_encoder->enc_priv);
}
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
}
 
static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
.destroy = radeon_enc_destroy,
.destroy = radeon_lvds_enc_destroy,
};
 
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
264,7 → 491,7
uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
switch (mode) {
case DRM_MODE_DPMS_ON:
294,8 → 521,6
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
/* adjust pm to dpms change */
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
331,7 → 556,7
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
if (radeon_crtc->crtc_id == 0) {
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
462,7 → 687,7
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
switch (mode) {
case DRM_MODE_DPMS_ON:
482,8 → 707,6
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
/* adjust pm to dpms change */
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
520,7 → 743,7
uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
int i;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
tmp &= 0xfffff;
628,7 → 851,7
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
switch (mode) {
case DRM_MODE_DPMS_ON:
650,8 → 873,6
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
/* adjust pm to dpms change */
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
686,11 → 907,11
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t fp2_gen_cntl;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
if (rdev->is_atom_bios) {
radeon_encoder->pixel_clock = adjusted_mode->clock;
atombios_external_tmds_setup(encoder, ATOM_ENABLE);
atombios_dvo_setup(encoder, ATOM_ENABLE);
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
} else {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
780,7 → 1001,7
uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
uint32_t tv_master_cntl = 0;
bool is_tv;
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
 
860,8 → 1081,6
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
/* adjust pm to dpms change */
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
900,7 → 1119,7
uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
bool is_tv = false;
 
DRM_DEBUG("\n");
DRM_DEBUG_KMS("\n");
 
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
 
925,17 → 1144,25
RADEON_TV_DAC_BDACPD);
}
 
/* FIXME TV */
if (tv_dac) {
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
RADEON_TV_DAC_NHOLD |
RADEON_TV_DAC_STD_PS2 |
tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
 
if (is_tv) {
if (tv_dac->tv_std == TV_STD_NTSC ||
tv_dac->tv_std == TV_STD_NTSC_J ||
tv_dac->tv_std == TV_STD_PAL_M ||
tv_dac->tv_std == TV_STD_PAL_60)
tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
else
tv_dac_cntl |= tv_dac->pal_tvdac_adj;
 
if (tv_dac->tv_std == TV_STD_NTSC ||
tv_dac->tv_std == TV_STD_NTSC_J)
tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
else
tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
} else
tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
tv_dac->ps2_tvdac_adj);
} else
tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
RADEON_TV_DAC_NHOLD |
RADEON_TV_DAC_STD_PS2);
 
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
}
943,16 → 1170,14
if (ASIC_IS_R300(rdev)) {
gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
}
} else if (rdev->family != CHIP_R200)
disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
else if (rdev->family == CHIP_R200)
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
 
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev))
if (rdev->family >= CHIP_R200)
disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
else
disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
 
if (rdev->family == CHIP_R200)
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
 
if (is_tv) {
uint32_t dac_cntl;
 
1017,16 → 1242,14
if (ASIC_IS_R300(rdev)) {
WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
}
} else if (rdev->family != CHIP_R200)
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
else if (rdev->family == CHIP_R200)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 
if (rdev->family >= CHIP_R200)
WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
else
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
 
if (rdev->family == CHIP_R200)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 
if (is_tv)
radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
 
1093,10 → 1316,10
tmp = RREG32(RADEON_TV_DAC_CNTL);
if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
found = true;
DRM_DEBUG("S-video TV connection detected\n");
DRM_DEBUG_KMS("S-video TV connection detected\n");
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
found = true;
DRM_DEBUG("Composite TV connection detected\n");
DRM_DEBUG_KMS("Composite TV connection detected\n");
}
 
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1159,10 → 1382,10
tmp = RREG32(RADEON_TV_DAC_CNTL);
if (tmp & RADEON_TV_DAC_GDACDET) {
found = true;
DRM_DEBUG("S-video TV connection detected\n");
DRM_DEBUG_KMS("S-video TV connection detected\n");
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
found = true;
DRM_DEBUG("Composite TV connection detected\n");
DRM_DEBUG_KMS("Composite TV connection detected\n");
}
 
WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
1183,7 → 1406,18
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
bool color = true;
struct drm_crtc *crtc;
 
/* find out if crtc2 is in use or if this encoder is using it */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
if (encoder->crtc != crtc) {
return connector_status_disconnected;
}
}
}
 
if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) {
1352,7 → 1586,7
}
 
void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
1361,7 → 1595,7
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->encoder_id == encoder_id) {
if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
1381,7 → 1615,8
 
radeon_encoder->enc_priv = NULL;
 
radeon_encoder->encoder_id = encoder_id;
radeon_encoder->encoder_enum = encoder_enum;
radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
 
/drivers/video/drm/radeon/radeon_legacy_tv.c
57,6 → 57,10
#define NTSC_TV_PLL_N_14 693
#define NTSC_TV_PLL_P_14 7
 
#define PAL_TV_PLL_M_14 19
#define PAL_TV_PLL_N_14 353
#define PAL_TV_PLL_P_14 5
 
#define VERT_LEAD_IN_LINES 2
#define FRAC_BITS 0xe
#define FRAC_MASK 0x3fff
208,6 → 212,21
8, /* crtcPLL_postDiv */
1022, /* pixToTV */
},
{ /* PAL timing for 14 Mhz ref clk */
800, /* horResolution */
600, /* verResolution */
TV_STD_PAL, /* standard */
1131, /* horTotal */
742, /* verTotal */
813, /* horStart */
840, /* horSyncStart */
633, /* verSyncStart */
708369, /* defRestart */
211, /* crtcPLL_N */
9, /* crtcPLL_M */
8, /* crtcPLL_postDiv */
759, /* pixToTV */
},
};
 
#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
242,7 → 261,7
if (pll->reference_freq == 2700)
const_ptr = &available_tv_modes[1];
else
const_ptr = &available_tv_modes[1]; /* FIX ME */
const_ptr = &available_tv_modes[3];
}
return const_ptr;
}
477,7 → 496,7
 
restart -= v_offset + h_offset;
 
DRM_DEBUG("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
 
tv_dac->tv.hrestart = restart % h_total;
486,7 → 505,7
restart /= v_total;
tv_dac->tv.frestart = restart % f_total;
 
DRM_DEBUG("compute_restart: F/H/V=%u,%u,%u\n",
DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
(unsigned)tv_dac->tv.frestart,
(unsigned)tv_dac->tv.vrestart,
(unsigned)tv_dac->tv.hrestart);
504,7 → 523,7
tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
((u32)h_inc << RADEON_H_INC_SHIFT);
 
DRM_DEBUG("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
 
return h_changed;
}
623,8 → 642,8
}
flicker_removal = (tmp + 500) / 1000;
 
if (flicker_removal < 2)
flicker_removal = 2;
if (flicker_removal < 3)
flicker_removal = 3;
for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
if (flicker_removal == SLOPE_limit[i])
break;
685,9 → 704,9
n = PAL_TV_PLL_N_27;
p = PAL_TV_PLL_P_27;
} else {
m = PAL_TV_PLL_M_27;
n = PAL_TV_PLL_N_27;
p = PAL_TV_PLL_P_27;
m = PAL_TV_PLL_M_14;
n = PAL_TV_PLL_N_14;
p = PAL_TV_PLL_P_14;
}
}
 
/drivers/video/drm/radeon/radeon_mode.h
34,11 → 34,12
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
#include <drm_fixed.h>
#include <drm_crtc_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "radeon_fixed.h"
 
struct radeon_bo;
struct radeon_device;
 
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
65,6 → 66,24
TV_STD_PAL_N,
};
 
enum radeon_underscan_type {
UNDERSCAN_OFF,
UNDERSCAN_ON,
UNDERSCAN_AUTO,
};
 
enum radeon_hpd_id {
RADEON_HPD_1 = 0,
RADEON_HPD_2,
RADEON_HPD_3,
RADEON_HPD_4,
RADEON_HPD_5,
RADEON_HPD_6,
RADEON_HPD_NONE = 0xff,
};
 
#define RADEON_MAX_I2C_BUS 16
 
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
84,7 → 103,7
/* id used by atom */
uint8_t i2c_id;
/* id used by atom */
uint8_t hpd_id;
enum radeon_hpd_id hpd;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
129,13 → 148,9
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
#define RADEON_PLL_IS_LCD (1 << 13)
#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
 
/* pll algo */
enum radeon_pll_algo {
PLL_ALGO_LEGACY,
PLL_ALGO_NEW
};
 
struct radeon_pll {
/* reference frequency */
uint32_t reference_freq;
149,6 → 164,8
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
uint32_t lcd_pll_out_min;
uint32_t lcd_pll_out_max;
uint32_t best_vco;
 
/* divider limits */
166,21 → 183,14
 
/* pll id */
uint32_t id;
/* pll algo */
enum radeon_pll_algo algo;
};
 
struct i2c_algo_radeon_data {
struct i2c_adapter bit_adapter;
struct i2c_algo_bit_data bit_data;
};
 
struct radeon_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
union {
struct i2c_algo_bit_data bit;
struct i2c_algo_dp_aux_data dp;
struct i2c_algo_radeon_data radeon;
} algo;
struct radeon_i2c_bus_rec rec;
};
187,7 → 197,7
 
/* mostly for macs, but really any system without connector tables */
enum radeon_connector_table {
CT_NONE,
CT_NONE = 0,
CT_GENERIC,
CT_IBOOK,
CT_POWERBOOK_EXTERNAL,
197,6 → 207,9
CT_MINI_INTERNAL,
CT_IMAC_G5_ISIGHT,
CT_EMAC,
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
};
 
enum radeon_dvo_chip {
204,6 → 217,8
DVO_SIL1178,
};
 
struct radeon_fbdev;
 
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
214,12 → 229,20
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
struct drm_property *load_detect_property;
/* TV standard load detect */
/* TV standard */
struct drm_property *tv_std_property;
/* legacy TMDS PLL detect */
struct drm_property *tmds_pll_property;
/* underscan */
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
 
/* pointer to fbdev info structure */
struct radeon_fbdev *rfbdev;
};
 
#define MAX_H_CODE_TIMING_LEN 32
251,6 → 274,8
uint32_t legacy_display_base_addr;
uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
u8 h_border;
u8 v_border;
fixed20_12 vsc;
fixed20_12 hsc;
struct drm_display_mode native_mode;
275,6 → 300,9
uint32_t lvds_gen_cntl;
/* panel mode */
struct drm_display_mode native_mode;
struct backlight_device *bl_dev;
int dpms_mode;
uint8_t backlight_level;
};
 
struct radeon_encoder_tv_dac {
308,23 → 336,29
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
uint8_t step;
uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
/* asic_ss */
uint16_t rate;
uint16_t amount;
};
 
struct radeon_encoder_atom_dig {
bool linkb;
/* atom dig */
bool coherent_mode;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
/* atom lvds */
uint32_t lvds_misc;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
/* atom lvds/edp */
uint32_t lcd_misc;
uint16_t panel_pwr_delay;
enum radeon_pll_algo pll_algo;
struct radeon_atom_ss *ss;
uint32_t lcd_ss_id;
/* panel mode */
struct drm_display_mode native_mode;
struct backlight_device *bl_dev;
int dpms_mode;
uint8_t backlight_level;
};
 
struct radeon_encoder_atom_dac {
333,6 → 367,7
 
struct radeon_encoder {
struct drm_encoder base;
uint32_t encoder_enum;
uint32_t encoder_id;
uint32_t devices;
uint32_t active_device;
339,16 → 374,22
uint32_t flags;
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
enum radeon_underscan_type underscan_type;
uint32_t underscan_hborder;
uint32_t underscan_vborder;
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
 
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
bool linkb;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
355,6 → 396,7
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
bool edp_on;
};
 
struct radeon_gpio_rec {
364,16 → 406,6
u32 mask;
};
 
enum radeon_hpd_id {
RADEON_HPD_NONE = 0,
RADEON_HPD_1,
RADEON_HPD_2,
RADEON_HPD_3,
RADEON_HPD_4,
RADEON_HPD_5,
RADEON_HPD_6,
};
 
struct radeon_hpd {
enum radeon_hpd_id hpd;
u8 plugged_state;
380,12 → 412,28
struct radeon_gpio_rec gpio;
};
 
struct radeon_router {
u32 router_id;
struct radeon_i2c_bus_rec i2c_info;
u8 i2c_addr;
/* i2c mux */
bool ddc_valid;
u8 ddc_mux_type;
u8 ddc_mux_control_pin;
u8 ddc_mux_state;
/* clock/data mux */
bool cd_valid;
u8 cd_mux_type;
u8 cd_mux_control_pin;
u8 cd_mux_state;
};
 
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
uint32_t devices;
struct radeon_i2c_chan *ddc_bus;
/* some systems have a an hdmi and vga port with a shared ddc line */
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
bool use_digital;
/* we need to mind the EDID between detect
395,6 → 443,8
bool dac_load_detect;
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
};
 
struct radeon_framebuffer {
402,28 → 452,46
struct drm_gem_object *obj;
};
 
 
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev);
 
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
 
extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
 
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void dp_link_train(struct drm_encoder *encoder,
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte);
u8 write_byte, u8 *read_byte);
 
extern void radeon_i2c_init(struct radeon_device *rdev);
extern void radeon_i2c_fini(struct radeon_device *rdev);
extern void radeon_combios_i2c_init(struct radeon_device *rdev);
extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
extern void radeon_i2c_add(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *i2c_bus);
extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
431,7 → 499,6
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
440,12 → 507,21
u8 slave_addr,
u8 addr,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
 
extern void radeon_compute_pll(struct radeon_pll *pll,
extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id);
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
 
extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
453,6 → 529,14
uint32_t *ref_div_p,
uint32_t *post_div_p);
 
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
u32 *frac_fb_div_p,
u32 *ref_div_p,
u32 *post_div_p);
 
extern void radeon_setup_encoder_clones(struct drm_device *dev);
 
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
460,14 → 544,19
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
 
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y,
enum mode_set_atomic state);
extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
477,7 → 566,13
 
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
 
extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y,
enum mode_set_atomic state);
extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
486,9 → 581,12
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
 
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
 
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
534,12 → 632,11
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
void radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj);
 
int radeonfb_probe(struct drm_device *dev);
 
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
556,10 → 653,11
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
 
/* legacy tv */
575,4 → 673,17
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
/* fbdev layer */
int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
 
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
 
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
#endif
/drivers/video/drm/radeon/radeon_object.c
30,6 → 30,7
* Dave Airlie
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
88,15 → 89,11
}
 
 
int radeon_object_create(struct radeon_device *rdev,
struct drm_gem_object *gobj,
unsigned long size,
bool kernel,
uint32_t domain,
bool interruptible,
struct radeon_object **robj_ptr)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
struct radeon_object *robj;
struct radeon_bo *bo;
enum ttm_bo_type type;
uint32_t flags;
int r;
106,18 → 103,17
} else {
type = ttm_bo_type_device;
}
*robj_ptr = NULL;
robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
if (robj == NULL) {
*bo_ptr = NULL;
bo = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
if (bo == NULL) {
return -ENOMEM;
}
robj->rdev = rdev;
// robj->gobj = gobj;
INIT_LIST_HEAD(&robj->list);
bo->rdev = rdev;
INIT_LIST_HEAD(&bo->list);
 
flags = radeon_object_flags_from_domain(domain);
 
robj->flags = flags;
bo->flags = flags;
 
if( flags & TTM_PL_FLAG_VRAM)
{
144,13 → 140,13
return r;
}
 
robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0);
bo->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0);
 
if (unlikely(robj->mm_node == NULL)) {
if (unlikely(bo->mm_node == NULL)) {
goto retry_pre_get;
}
 
robj->vm_addr = ((uint32_t)robj->mm_node->start);
bo->vm_addr = ((uint32_t)bo->mm_node->start);
 
// dbgprintf("alloc vram: base %x size %x\n",
// robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT);
188,7 → 184,7
goto retry_pre_get1;
}
 
robj->vm_addr = ((uint32_t)robj->mm_node->start) ;
bo->vm_addr = ((uint32_t)bo->mm_node->start) ;
 
// dbgprintf("alloc gtt: base %x size %x\n",
// robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT);
/drivers/video/drm/radeon/radeon_object.h
75,7 → 75,7
* Returns current GPU offset of the object.
*
* Note: object should either be pinned or reserved when calling this
* function, it might be usefull to add check for this for debugging.
* function, it might be useful to add check for this for debugging.
*/
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
{
112,23 → 112,20
int r;
 
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
if (unlikely(r != 0))
return r;
}
spin_lock(&bo->tbo.lock);
// spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.lock);
// spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
 
extern int radeon_bo_create(struct radeon_device *rdev,
struct drm_gem_object *gobj, unsigned long size,
unsigned long size, int byte_align,
bool kernel, u32 domain,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
142,10 → 139,7
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head);
extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
156,6 → 150,6
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
/drivers/video/drm/radeon/radeon_object_kos.c
101,8 → 101,9
bo->reserved.counter = 1;
}
 
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long size, bool kernel, u32 domain,
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
enum ttm_bo_type type;
143,7 → 144,6
return -ENOMEM;
 
bo->rdev = rdev;
bo->gobj = gobj;
bo->surface_reg = -1;
bo->tbo.num_pages = num_pages;
bo->domain = domain;
359,7 → 359,7
return -ENOMEM;
 
bo->rdev = rdev;
bo->gobj = gobj;
// bo->gobj = gobj;
bo->surface_reg = -1;
bo->tbo.num_pages = num_pages;
bo->domain = domain;
371,7 → 371,6
 
vm_node = kzalloc(sizeof(*vm_node),0);
 
vm_node->free = 0;
vm_node->size = 0xC00000 >> 12;
vm_node->start = 0;
vm_node->mm = NULL;
/drivers/video/drm/radeon/radeon_pm.c
24,23 → 24,14
#include "radeon.h"
#include "avivod.h"
 
#define DRM_DEBUG_DRIVER(fmt, args...)
 
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
 
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
static void radeon_pm_idle_work_handler(struct work_struct *work);
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
 
static const char *pm_state_names[4] = {
"PM_STATE_DISABLED",
"PM_STATE_MINIMUM",
"PM_STATE_PAUSED",
"PM_STATE_ACTIVE"
};
 
static const char *pm_state_types[5] = {
static const char *radeon_pm_state_type_name[5] = {
"Default",
"Powersave",
"Battery",
48,238 → 39,552
"Performance",
};
 
static void radeon_print_power_mode_info(struct radeon_device *rdev)
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
 
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
 
#define ACPI_AC_CLASS "ac_adapter"
 
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
void *data)
{
int i, j;
bool is_default;
struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
 
DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
is_default = true;
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
DRM_DEBUG_DRIVER("pm: AC\n");
else
is_default = false;
DRM_INFO("State %d %s %s\n", i,
pm_state_types[rdev->pm.power_state[i].type],
is_default ? "(default)" : "");
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
if (rdev->flags & RADEON_IS_IGP)
DRM_INFO("\t\t%d engine: %d\n",
j,
rdev->pm.power_state[i].clock_info[j].sclk * 10);
else
DRM_INFO("\t\t%d engine/memory: %d/%d\n",
j,
rdev->pm.power_state[i].clock_info[j].sclk * 10,
rdev->pm.power_state[i].clock_info[j].mclk * 10);
DRM_DEBUG_DRIVER("pm: DC\n");
 
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
mutex_unlock(&rdev->pm.mutex);
}
}
}
 
static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
enum radeon_pm_state_type type)
return NOTIFY_OK;
}
#endif
 
static void radeon_pm_update_profile(struct radeon_device *rdev)
{
int i, j;
enum radeon_pm_state_type wanted_types[2];
int wanted_count;
 
switch (type) {
case POWER_STATE_TYPE_DEFAULT:
default:
return rdev->pm.default_power_state;
case POWER_STATE_TYPE_POWERSAVE:
if (rdev->flags & RADEON_IS_MOBILITY) {
wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
wanted_types[1] = POWER_STATE_TYPE_BATTERY;
wanted_count = 2;
} else {
wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
wanted_count = 1;
}
switch (rdev->pm.profile) {
case PM_PROFILE_DEFAULT:
rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
break;
case POWER_STATE_TYPE_BATTERY:
if (rdev->flags & RADEON_IS_MOBILITY) {
wanted_types[0] = POWER_STATE_TYPE_BATTERY;
wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
wanted_count = 2;
case PM_PROFILE_AUTO:
if (power_supply_is_system_supplied() > 0) {
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
wanted_count = 1;
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
}
break;
case POWER_STATE_TYPE_BALANCED:
case POWER_STATE_TYPE_PERFORMANCE:
wanted_types[0] = type;
wanted_count = 1;
case PM_PROFILE_LOW:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
case PM_PROFILE_MID:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
break;
case PM_PROFILE_HIGH:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
break;
}
 
for (i = 0; i < wanted_count; i++) {
for (j = 0; j < rdev->pm.num_power_states; j++) {
if (rdev->pm.power_state[j].type == wanted_types[i])
return &rdev->pm.power_state[j];
if (rdev->pm.active_crtc_count == 0) {
rdev->pm.requested_power_state_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
rdev->pm.requested_clock_mode_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
} else {
rdev->pm.requested_power_state_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
rdev->pm.requested_clock_mode_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
}
}
 
return rdev->pm.default_power_state;
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
{
struct radeon_bo *bo, *n;
 
if (list_empty(&rdev->gem.objects))
return;
 
}
 
static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
struct radeon_power_state *power_state,
enum radeon_pm_clock_mode_type type)
 
static void radeon_set_power_state(struct radeon_device *rdev)
{
switch (type) {
case POWER_MODE_TYPE_DEFAULT:
default:
return power_state->default_clock_mode;
case POWER_MODE_TYPE_LOW:
return &power_state->clock_info[0];
case POWER_MODE_TYPE_MID:
if (power_state->num_clock_modes > 2)
return &power_state->clock_info[1];
u32 sclk, mclk;
bool misc_after = false;
 
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
 
if (radeon_gui_idle(rdev)) {
sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk;
if (sclk > rdev->pm.default_sclk)
sclk = rdev->pm.default_sclk;
 
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
if (mclk > rdev->pm.default_mclk)
mclk = rdev->pm.default_mclk;
 
/* upvolt before raising clocks, downvolt after lowering clocks */
if (sclk < rdev->pm.current_sclk)
misc_after = true;
 
// radeon_sync_with_vblank(rdev);
 
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (!radeon_pm_in_vbl(rdev))
return;
}
 
radeon_pm_prepare(rdev);
 
if (!misc_after)
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
 
/* set engine clock */
if (sclk != rdev->pm.current_sclk) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_engine_clock(rdev, sclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_sclk = sclk;
DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
}
 
/* set memory clock */
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_mclk = mclk;
DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
}
 
if (misc_after)
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
 
radeon_pm_finish(rdev);
 
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else
DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
}
 
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
int i;
 
/* no need to take locks, etc. if nothing's going to change */
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
 
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
 
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) {
/* wait for GPU idle */
rdev->pm.gui_idle = false;
rdev->irq.gui_idle = true;
}
} else {
if (rdev->cp.ready) {
// struct radeon_fence *fence;
// radeon_ring_alloc(rdev, 64);
// radeon_fence_create(rdev, &fence);
// radeon_fence_emit(rdev, fence);
// radeon_ring_commit(rdev);
// radeon_fence_wait(fence, false);
// radeon_fence_unref(&fence);
}
}
radeon_unmap_vram_bos(rdev);
 
if (rdev->irq.installed) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->pm.active_crtcs & (1 << i)) {
rdev->pm.req_vblank |= (1 << i);
// drm_vblank_get(rdev->ddev, i);
}
}
}
 
radeon_set_power_state(rdev);
 
if (rdev->irq.installed) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
// drm_vblank_put(rdev->ddev, i);
}
}
}
 
/* update display watermarks based on new power state */
radeon_update_bandwidth_info(rdev);
if (rdev->pm.active_crtc_count)
radeon_bandwidth_update(rdev);
 
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
 
static void radeon_pm_print_states(struct radeon_device *rdev)
{
int i, j;
struct radeon_power_state *power_state;
struct radeon_pm_clock_info *clock_info;
 
DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
for (i = 0; i < rdev->pm.num_power_states; i++) {
power_state = &rdev->pm.power_state[i];
DRM_DEBUG_DRIVER("State %d: %s\n", i,
radeon_pm_state_type_name[power_state->type]);
if (i == rdev->pm.default_power_state_index)
DRM_DEBUG_DRIVER("\tDefault");
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
DRM_DEBUG_DRIVER("\tSingle display only\n");
DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
else
return &power_state->clock_info[0];
break;
case POWER_MODE_TYPE_HIGH:
return &power_state->clock_info[power_state->num_clock_modes - 1];
DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->mclk * 10,
clock_info->voltage.voltage,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
}
}
}
 
static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
{
 
return snprintf(buf, PAGE_SIZE, "%s\n", "default");
}
 
static void radeon_get_power_state(struct radeon_device *rdev,
enum radeon_pm_action action)
static ssize_t radeon_set_pm_profile(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
switch (action) {
case PM_ACTION_MINIMUM:
rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
rdev->pm.requested_clock_mode =
radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
 
mutex_lock(&rdev->pm.mutex);
 
rdev->pm.profile = PM_PROFILE_DEFAULT;
 
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
fail:
mutex_unlock(&rdev->pm.mutex);
 
return count;
}
 
static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
 
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
}
 
static ssize_t radeon_set_pm_method(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
 
 
if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.pm_method = PM_METHOD_DYNPM;
rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
mutex_lock(&rdev->pm.mutex);
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
// cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
DRM_ERROR("invalid power method!\n");
goto fail;
}
radeon_pm_compute_clocks(rdev);
fail:
return count;
}
 
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
u32 temp;
 
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
temp = rv6xx_get_temp(rdev);
break;
case PM_ACTION_DOWNCLOCK:
rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
rdev->pm.requested_clock_mode =
radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
case THERMAL_TYPE_RV770:
temp = rv770_get_temp(rdev);
break;
case PM_ACTION_UPCLOCK:
rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
rdev->pm.requested_clock_mode =
radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
case THERMAL_TYPE_EVERGREEN:
case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
case PM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
temp = 0;
break;
}
DRM_INFO("Requested: e: %d m: %d p: %d\n",
rdev->pm.requested_clock_mode->sclk,
rdev->pm.requested_clock_mode->mclk,
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
 
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
 
static void radeon_set_power_state(struct radeon_device *rdev)
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
/* if *_clock_mode are the same, *_power_state are as well */
if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
return;
return sprintf(buf, "radeon\n");
}
 
DRM_INFO("Setting: e: %d m: %d p: %d\n",
rdev->pm.requested_clock_mode->sclk,
rdev->pm.requested_clock_mode->mclk,
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
/* set pcie lanes */
/* set voltage */
/* set engine clock */
radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
/* set memory clock */
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
 
rdev->pm.current_power_state = rdev->pm.requested_power_state;
rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
rdev->pm.int_hwmon_dev = NULL;
 
return err;
}
 
static void radeon_hwmon_fini(struct radeon_device *rdev)
{
}
 
void radeon_pm_suspend(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
}
mutex_unlock(&rdev->pm.mutex);
 
// cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
 
void radeon_pm_resume(struct radeon_device *rdev)
{
/* asic init will reset the default power state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
// schedule_delayed_work(&rdev->pm.dynpm_idle_work,
// msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
 
int radeon_pm_init(struct radeon_device *rdev)
{
rdev->pm.state = PM_STATE_DISABLED;
rdev->pm.planned_action = PM_ACTION_NONE;
rdev->pm.downclocked = false;
int ret;
 
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
rdev->pm.profile = PM_PROFILE_DEFAULT;
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
 
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
radeon_print_power_mode_info(rdev);
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
}
 
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
if (ret)
return ret;
 
if (rdev->pm.num_power_states > 1) {
 
DRM_INFO("radeon: power management initialized\n");
}
 
// INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
return 0;
}
 
if (radeon_dynpm != -1 && radeon_dynpm) {
rdev->pm.state = PM_STATE_PAUSED;
DRM_INFO("radeon: dynamic power management enabled\n");
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
 
DRM_INFO("radeon: power management initialized\n");
// cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 
return 0;
}
 
radeon_hwmon_fini(rdev);
}
 
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
int count = 0;
 
if (rdev->pm.state == PM_STATE_DISABLED)
if (rdev->pm.num_power_states < 2)
return;
 
mutex_lock(&rdev->pm.mutex);
 
rdev->pm.active_crtcs = 0;
list_for_each_entry(connector,
&ddev->mode_config.connector_list, head) {
if (connector->encoder &&
connector->dpms != DRM_MODE_DPMS_OFF) {
radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
rdev->pm.active_crtc_count = 0;
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
++count;
rdev->pm.active_crtc_count++;
}
}
 
if (count > 1) {
if (rdev->pm.state == PM_STATE_ACTIVE) {
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
if (rdev->pm.active_crtc_count > 1) {
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
// cancel_delayed_work(&rdev->pm.dynpm_idle_work);
 
rdev->pm.state = PM_STATE_PAUSED;
rdev->pm.planned_action = PM_ACTION_UPCLOCK;
if (rdev->pm.downclocked)
rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
 
DRM_DEBUG("radeon: dynamic power management deactivated\n");
DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
}
} else if (count == 1) {
} else if (rdev->pm.active_crtc_count == 1) {
/* TODO: Increase clocks if needed for current mode */
 
if (rdev->pm.state == PM_STATE_MINIMUM) {
rdev->pm.state = PM_STATE_ACTIVE;
rdev->pm.planned_action = PM_ACTION_UPCLOCK;
if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
 
// schedule_delayed_work(&rdev->pm.dynpm_idle_work,
// msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
// schedule_delayed_work(&rdev->pm.dynpm_idle_work,
// msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
else if (rdev->pm.state == PM_STATE_PAUSED) {
rdev->pm.state = PM_STATE_ACTIVE;
DRM_DEBUG("radeon: dynamic power management activated\n");
} else { /* count == 0 */
if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
// cancel_delayed_work(&rdev->pm.dynpm_idle_work);
 
rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
}
else { /* count == 0 */
if (rdev->pm.state != PM_STATE_MINIMUM) {
rdev->pm.state = PM_STATE_MINIMUM;
rdev->pm.planned_action = PM_ACTION_MINIMUM;
radeon_pm_set_clocks(rdev);
}
}
 
286,139 → 591,38
mutex_unlock(&rdev->pm.mutex);
}
 
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
u32 stat_crtc1 = 0, stat_crtc2 = 0;
int crtc, vpos, hpos, vbl_status;
bool in_vbl = true;
 
if (ASIC_IS_AVIVO(rdev)) {
if (rdev->pm.active_crtcs & (1 << 0)) {
stat_crtc1 = RREG32(D1CRTC_STATUS);
if (!(stat_crtc1 & 1))
/* Iterate over all active crtc's. All crtc's must be in vblank,
* otherwise return in_vbl == false.
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
stat_crtc2 = RREG32(D2CRTC_STATUS);
if (!(stat_crtc2 & 1))
in_vbl = false;
}
}
if (in_vbl == false)
DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
stat_crtc2, finish ? "exit" : "entry");
 
return in_vbl;
}
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
{
/*radeon_fence_wait_last(rdev);*/
switch (rdev->pm.planned_action) {
case PM_ACTION_UPCLOCK:
rdev->pm.downclocked = false;
break;
case PM_ACTION_DOWNCLOCK:
rdev->pm.downclocked = true;
break;
case PM_ACTION_MINIMUM:
break;
case PM_ACTION_NONE:
DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
break;
}
 
/* check if we are in vblank */
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_power_state(rdev);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.planned_action = PM_ACTION_NONE;
}
 
static void radeon_pm_set_clocks(struct radeon_device *rdev)
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
{
radeon_get_power_state(rdev, rdev->pm.planned_action);
mutex_lock(&rdev->cp.mutex);
u32 stat_crtc = 0;
bool in_vbl = radeon_pm_in_vbl(rdev);
 
if (rdev->pm.active_crtcs & (1 << 0)) {
rdev->pm.req_vblank |= (1 << 0);
// drm_vblank_get(rdev->ddev, 0);
if (in_vbl == false)
DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
rdev->pm.req_vblank |= (1 << 1);
// drm_vblank_get(rdev->ddev, 1);
}
if (rdev->pm.active_crtcs)
// wait_event_interruptible_timeout(
// rdev->irq.vblank_queue, 0,
// msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
if (rdev->pm.req_vblank & (1 << 0)) {
rdev->pm.req_vblank &= ~(1 << 0);
// drm_vblank_put(rdev->ddev, 0);
}
if (rdev->pm.req_vblank & (1 << 1)) {
rdev->pm.req_vblank &= ~(1 << 1);
// drm_vblank_put(rdev->ddev, 1);
}
 
radeon_pm_set_clocks_locked(rdev);
mutex_unlock(&rdev->cp.mutex);
}
 
#if 0
static void radeon_pm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
rdev = container_of(work, struct radeon_device,
pm.idle_work.work);
 
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.state == PM_STATE_ACTIVE) {
unsigned long irq_flags;
int not_processed = 0;
 
read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (!list_empty(&rdev->fence_drv.emited)) {
struct list_head *ptr;
list_for_each(ptr, &rdev->fence_drv.emited) {
/* count up to 3, that's enought info */
if (++not_processed >= 3)
break;
}
}
read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
if (not_processed >= 3) { /* should upclock */
if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
rdev->pm.planned_action = PM_ACTION_NONE;
} else if (rdev->pm.planned_action == PM_ACTION_NONE &&
rdev->pm.downclocked) {
rdev->pm.planned_action =
PM_ACTION_UPCLOCK;
rdev->pm.action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
} else if (not_processed == 0) { /* should downclock */
if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
rdev->pm.planned_action = PM_ACTION_NONE;
} else if (rdev->pm.planned_action == PM_ACTION_NONE &&
!rdev->pm.downclocked) {
rdev->pm.planned_action =
PM_ACTION_DOWNCLOCK;
rdev->pm.action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
}
 
if (rdev->pm.planned_action != PM_ACTION_NONE &&
jiffies > rdev->pm.action_timeout) {
radeon_pm_set_clocks(rdev);
}
}
mutex_unlock(&rdev->pm.mutex);
 
queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
#endif
 
/*
* Debugfs info
*/
430,12 → 634,13
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
 
seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
if (rdev->asic->get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
 
/drivers/video/drm/radeon/radeon_reg.h
55,6 → 55,7
#include "r500_reg.h"
#include "r600_reg.h"
#include "evergreen_reg.h"
#include "ni_reg.h"
 
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
320,7 → 321,16
# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9)
# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10)
# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11)
# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12)
# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13)
 
#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
 
#define RADEON_CACHE_CNTL 0x1724
#define RADEON_CACHE_LINE 0x0f0c /* PCI */
#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */
346,6 → 356,7
# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
# define RADEON_TVCLK_TURNOFF (1 << 31)
#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
# define RADEON_PM_MODE_SEL (1 << 13)
# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
#define RADEON_CLR_CMP_CLR_3D 0x1a24
#define RADEON_CLR_CMP_CLR_DST 0x15c8
364,6 → 375,8
#define RADEON_CONFIG_APER_SIZE 0x0108
#define RADEON_CONFIG_BONDS 0x00e8
#define RADEON_CONFIG_CNTL 0x00e0
# define RADEON_CFG_VGA_RAM_EN (1 << 8)
# define RADEON_CFG_VGA_IO_DIS (1 << 9)
# define RADEON_CFG_ATI_REV_A11 (0 << 16)
# define RADEON_CFG_ATI_REV_A12 (1 << 16)
# define RADEON_CFG_ATI_REV_A13 (2 << 16)
421,6 → 434,7
# define RADEON_CRTC_CSYNC_EN (1 << 4)
# define RADEON_CRTC_ICON_EN (1 << 15)
# define RADEON_CRTC_CUR_EN (1 << 16)
# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
# define RADEON_CRTC_CUR_MODE_SHIFT 20
# define RADEON_CRTC_CUR_MODE_MONO 0
508,6 → 522,8
# define RADEON_CRTC_TILE_EN (1 << 15)
# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
 
#define R300_CRTC_TILE_X0_Y0 0x0350
#define R300_CRTC2_TILE_X0_Y0 0x0358
552,7 → 568,6
# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
#define RADEON_CRTC2_STATUS 0x03fc
#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
994,6 → 1009,7
# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_FP2_DETECT_MASK (1 << 10)
# define RADEON_GUI_IDLE_MASK (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
1005,6 → 1021,8
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_FP2_DETECT_STAT (1 << 10)
# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
# define RADEON_GUI_IDLE_STAT (1 << 19)
# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
2833,6 → 2851,7
# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
# define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27)
# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
/drivers/video/drm/radeon/radeon_ring.c
26,6 → 26,7
* Jerome Glisse
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
123,7 → 124,7
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64);
if (r) {
DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib);
148,8 → 149,8
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
167,7 → 168,7
r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
return r;
}
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
192,22 → 193,26
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
int r;
struct radeon_bo *robj;
 
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
// radeon_ib_bogus_cleanup(rdev);
robj = rdev->ib_pool.robj;
rdev->ib_pool.robj = NULL;
mutex_unlock(&rdev->ib_pool.mutex);
 
if (robj) {
r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->ib_pool.robj);
radeon_bo_unpin(rdev->ib_pool.robj);
radeon_bo_unreserve(rdev->ib_pool.robj);
radeon_bo_kunmap(robj);
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
radeon_bo_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
radeon_bo_unref(&robj);
}
mutex_unlock(&rdev->ib_pool.mutex);
}
 
 
216,10 → 221,14
*/
void radeon_ring_free_size(struct radeon_device *rdev)
{
if (rdev->wb.enabled)
rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
else
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
}
/* This works because ring_size is a power of 2 */
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr;
229,7 → 238,7
}
}
 
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
 
236,7 → 245,6
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
mutex_lock(&rdev->cp.mutex);
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
253,8 → 261,21
return 0;
}
 
void radeon_ring_unlock_commit(struct radeon_device *rdev)
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
{
int r;
 
mutex_lock(&rdev->cp.mutex);
r = radeon_ring_alloc(rdev, ndw);
if (r) {
mutex_unlock(&rdev->cp.mutex);
return r;
}
return 0;
}
 
void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
 
266,6 → 287,11
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
}
 
void radeon_ring_unlock_commit(struct radeon_device *rdev)
{
radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
 
282,7 → 308,7
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
315,21 → 341,24
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
struct radeon_bo *ring_obj;
 
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj) {
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->cp.ring_obj);
radeon_bo_unpin(rdev->cp.ring_obj);
radeon_bo_unreserve(rdev->cp.ring_obj);
}
radeon_bo_unref(&rdev->cp.ring_obj);
ring_obj = rdev->cp.ring_obj;
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
}
mutex_unlock(&rdev->cp.mutex);
 
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(ring_obj);
radeon_bo_unpin(ring_obj);
radeon_bo_unreserve(ring_obj);
}
radeon_bo_unref(&ring_obj);
}
}
 
 
/*
354,8 → 383,37
return 0;
}
 
static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct radeon_device *rdev = node->info_ent->data;
struct radeon_ib *ib;
unsigned i;
 
mutex_lock(&rdev->ib_pool.mutex);
if (list_empty(&rdev->ib_pool.bogus_ib)) {
mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "no bogus IB recorded\n");
return 0;
}
ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
list_del_init(&ib->list);
mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
vfree(ib->ptr);
kfree(ib);
return 0;
}
 
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
 
static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
};
#endif
 
int radeon_debugfs_ib_init(struct radeon_device *rdev)
/drivers/video/drm/radeon/rdisplay.c
15,8 → 15,7
static cursor_t* __stdcall select_cursor(cursor_t *cursor);
static void __stdcall move_cursor(cursor_t *cursor, int x, int y);
 
extern void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor);
//extern void destroy_cursor(void);
extern void destroy_cursor(void);
 
void disable_mouse(void)
{};
33,8 → 32,8
 
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
r = radeon_bo_create(rdev, NULL, CURSOR_WIDTH*CURSOR_HEIGHT*4,
false, RADEON_GEM_DOMAIN_VRAM, &cursor->robj);
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, &cursor->robj);
 
if (unlikely(r != 0))
return r;
67,13 → 66,12
 
radeon_bo_kunmap(cursor->robj);
 
cursor->header.destroy = destroy_cursor;
// cursor->header.destroy = destroy_cursor;
 
return 0;
};
 
//void __attribute__((externally_visible)) fini_cursor(cursor_t *cursor)
void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor)
void fini_cursor(cursor_t *cursor)
{
list_del(&cursor->list);
radeon_bo_unpin(cursor->robj);
272,6 → 270,8
kfree(info);
}
 
#if 0
 
#define PACKET3_PAINT_MULTI 0x9A
# define R5XX_GMC_CLR_CMP_CNTL_DIS (1 << 28)
# define R5XX_GMC_WR_MSK_DIS (1 << 30)
2244,8 → 2244,8
obj_size += 32*4;
obj_size = ALIGN(obj_size, 256);
 
r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
&state_obj);
r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, &state_obj);
if (r) {
DRM_ERROR("r600 failed to allocate state buffer\n");
return r;
2414,3 → 2414,5
LEAVE();
return r;
}
 
#endif
/drivers/video/drm/radeon/rdisplay_kms.c
243,8 → 243,8
encoder = connector->encoder;
crtc = encoder->crtc;
 
fb = list_first_entry(&dev->mode_config.fb_kernel_list,
struct drm_framebuffer, filp_head);
// fb = list_first_entry(&dev->mode_config.fb_kernel_list,
// struct drm_framebuffer, filp_head);
 
// memcpy(con_edid, connector->edid_blob_ptr->data, 128);
 
309,6 → 309,8
static struct drm_connector* get_def_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
 
struct drm_connector *def_connector = NULL;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
316,14 → 318,22
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n", connector,
connector->base.id, connector->status, connector->encoder);
 
if( connector->status != connector_status_connected)
continue;
 
encoder = connector->encoder;
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if( encoder == NULL)
continue;
 
connector->encoder = encoder;
 
crtc = encoder->crtc;
dbgprintf("encoder %x crtc %x\n", encoder, crtc);
 
if(crtc == NULL)
continue;
 
/drivers/video/drm/radeon/rs400.c
28,6 → 28,7
#include <linux/seq_file.h>
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rs400d.h"
 
/* This files gather functions specifics to : rs400,rs480 */
53,13 → 54,7
rdev->mc.gtt_size = 32 * 1024 * 1024;
return;
}
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
/* FIXME: RS400 & RS480 seems to have issue with GART size
* if 4G of system memory (needs more testing) */
rdev->mc.gtt_size = 32 * 1024 * 1024;
DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
}
}
 
void rs400_gart_tlb_flush(struct radeon_device *rdev)
{
82,7 → 77,7
int r;
 
if (rdev->gart.table.ram.ptr) {
WARN(1, "RS400 GART already initialized.\n");
WARN(1, "RS400 GART already initialized\n");
return 0;
}
/* Check gart size */
202,11 → 197,14
 
void rs400_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs400_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
radeon_gart_fini(rdev);
}
 
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
 
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
217,7 → 215,7
 
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4) |
0xc;
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
rdev->gart.table.ram.ptr[i] = entry;
return 0;
230,8 → 228,8
 
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(0x0150);
if (tmp & (1 << 2)) {
tmp = RREG32(RADEON_MC_STATUS);
if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
241,13 → 239,11
 
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "rs400: Failed to wait MC idle while "
"programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
"programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
}
}
 
263,7 → 259,9
r100_vram_init_sizes(rdev);
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
 
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
304,9 → 302,9
seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
tmp = RREG32_MC(0x100);
tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
tmp = RREG32(0x134);
tmp = RREG32(RS690_HDP_FB_LOCATION);
seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
} else {
tmp = RREG32(RADEON_AGP_BASE);
388,6 → 386,8
{
int r;
 
r100_set_common_regs(rdev);
 
rs400_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
405,12 → 405,9
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
433,6 → 430,8
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
447,7 → 446,7
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
459,8 → 458,6
 
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
/drivers/video/drm/radeon/rs600.c
37,6 → 37,7
*/
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "rs600d.h"
 
146,6 → 147,78
}
}
 
void rs600_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
 
/* disable bus mastering */
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4);
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB);
mdelay(1);
}
 
int rs600_asic_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
u32 status, tmp;
int ret = 0;
 
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
/* Stops all mc clients */
rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
// pci_save_state(rdev->pdev);
/* disable bus mastering */
rs600_bm_disable(rdev);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset MC */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
// pci_restore_state(rdev->pdev);
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
rv515_mc_resume(rdev, &save);
return ret;
}
 
/*
* GART.
*/
158,7 → 231,7
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
 
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
 
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
172,7 → 245,7
int r;
 
if (rdev->gart.table.vram.robj) {
WARN(1, "RS600 GART already initialized.\n");
WARN(1, "RS600 GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
267,9 → 340,9
 
void rs600_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
 
#define R600_PTE_VALID (1 << 0)
292,7 → 365,8
return 0;
}
 
/*
#if 0
 
int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
303,7 → 377,7
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
310,10 → 384,15
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.crtc_vblank_int[0]) {
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1]) {
if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
328,36 → 407,41
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
return 0;
}
*/
 
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
static inline u32 rs600_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = ~C_000044_SW_INT;
uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
 
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= S_000044_GUI_IDLE_STAT(1);
}
 
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006534_D1MODE_VBLANK_STATUS,
S_006534_D1MODE_VBLANK_ACK(1));
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
} else {
*r500_disp_int = 0;
rdev->irq.stat_regs.r500.disp_int = 0;
}
 
if (irqs) {
368,15 → 452,14
 
void rs600_irq_disable(struct radeon_device *rdev)
{
u32 tmp;
 
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
mdelay(1);
rs600_irq_ack(rdev, &tmp);
rs600_irq_ack(rdev);
}
 
#endif
 
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
400,7 → 483,6
 
void rs600_gpu_init(struct radeon_device *rdev)
{
r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
411,8 → 493,8
{
u64 base;
 
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
422,13 → 504,38
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
 
void rs600_bandwidth_update(struct radeon_device *rdev)
{
/* FIXME: implement, should this be like rs690 ? */
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
/* FIXME: implement full support */
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
mode1 = &rdev->mode_info.crtcs[1]->base.mode;
 
rs690_line_buffer_adjust(rdev, mode0, mode1);
 
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
}
 
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
501,12 → 608,9
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
527,6 → 631,8
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
541,7 → 647,7
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
553,8 → 659,6
 
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize memory controller */
rs600_mc_init(rdev);
rs600_debugfs(rdev);
/drivers/video/drm/radeon/rs600d.h
178,6 → 178,52
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
535,4 → 581,91
#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
 
#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
 
/* PLL regs */
#define GENERAL_PWRMGT 0x8
#define GLOBAL_PWRMGT_EN (1 << 0)
#define MOBILE_SU (1 << 2)
#define DYN_PWRMGT_SCLK_LENGTH 0xc
#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
#define STATIC_SCREEN_HILEN(x) ((x) << 24)
#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
#define DYN_SCLK_VOL_CNTL 0xe
#define IO_CG_VOLTAGE_DROP (1 << 0)
#define VOLTAGE_DROP_SYNC (1 << 2)
#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
#define HDP_DYN_CNTL 0x10
#define HDP_FORCEON (1 << 0)
#define MC_HOST_DYN_CNTL 0x1e
#define MC_HOST_FORCEON (1 << 0)
#define DYN_BACKBIAS_CNTL 0x29
#define IO_CG_BACKBIAS_EN (1 << 0)
 
/* mmreg */
#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
#define PWRDN_WAIT_BUSY_OFF (1 << 0)
#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
#define PWRDN_WAIT_PPLL_OFF (1 << 8)
#define PWRUP_WAIT_PPLL_ON (1 << 12)
#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
#define PM_ASSERT_RESET (1 << 20)
#define PM_PWRDN_PPLL (1 << 24)
 
#endif
/drivers/video/drm/radeon/rs690.c
27,6 → 27,7
*/
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "rs690d.h"
 
47,8 → 48,6
 
static void rs690_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs690 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
57,65 → 56,82
}
}
 
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
};
 
void rs690_pm_info(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
void *ptr;
union igp_info *info;
uint16_t data_offset;
uint8_t frev, crev;
fixed20_12 tmp;
 
atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
&frev, &crev, &data_offset);
ptr = rdev->mode_info.atom_context->bios + data_offset;
info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
&frev, &crev, &data_offset)) {
info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
 
/* Get various system informations from bios */
switch (crev) {
case 1:
tmp.full = rfixed_const(100);
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
tmp.full = dfixed_const(100);
rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
if (le16_to_cpu(info->info.usK8MemoryClock))
rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
else if (rdev->clock.default_mclk) {
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
} else
rdev->pm.igp_system_mclk.full = dfixed_const(400);
rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
break;
case 2:
tmp.full = rfixed_const(100);
rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
tmp.full = dfixed_const(100);
rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
else if (rdev->clock.default_mclk)
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
else
rdev->pm.igp_system_mclk.full = dfixed_const(66700);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
default:
tmp.full = rfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
/* FIXME: system clock ? */
rdev->pm.igp_system_mclk.full = rfixed_const(100);
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
rdev->pm.igp_ht_link_width.full = rfixed_const(8);
rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
rdev->pm.igp_system_mclk.full = dfixed_const(200);
rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
break;
}
} else {
/* We assume the slower possible clock ie worst case */
rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
rdev->pm.igp_system_mclk.full = dfixed_const(200);
rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
}
/* Compute various bandwidth */
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
tmp.full = rfixed_const(4);
rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
tmp.full = dfixed_const(4);
rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
* = ht_clk * ht_width / 5
*/
tmp.full = rfixed_const(5);
rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
tmp.full = dfixed_const(5);
rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
rdev->pm.igp_ht_link_width);
rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
if (tmp.full < rdev->pm.max_bandwidth.full) {
/* HT link is a limiting factor */
rdev->pm.max_bandwidth.full = tmp.full;
123,15 → 139,14
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
* = (sideport_clk * 14) / 10
*/
tmp.full = rfixed_const(14);
rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
tmp.full = rfixed_const(10);
rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
tmp.full = dfixed_const(14);
rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
tmp.full = dfixed_const(10);
rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
 
void rs690_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u64 base;
 
rs400_gart_adjust_size(rdev);
139,24 → 154,17
rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
a.full = rfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
 
void rs690_line_buffer_adjust(struct radeon_device *rdev,
224,10 → 232,6
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
/* FIXME: detect IGP with sideport memory, i don't think there is any
* such product available
*/
bool sideport = false;
 
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
235,20 → 239,20
return;
}
 
if (crtc->vsc.full > rfixed_const(2))
wm->num_line_pair.full = rfixed_const(2);
if (crtc->vsc.full > dfixed_const(2))
wm->num_line_pair.full = dfixed_const(2);
else
wm->num_line_pair.full = rfixed_const(1);
wm->num_line_pair.full = dfixed_const(1);
 
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
b.full = dfixed_const(mode->crtc_hdisplay);
c.full = dfixed_const(256);
a.full = dfixed_div(b, c);
request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
 
/* Determine consumption rate
257,23 → 261,23
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
a.full = rfixed_const(mode->clock);
b.full = rfixed_const(1000);
a.full = rfixed_div(a, b);
pclk.full = rfixed_div(b, a);
a.full = dfixed_const(mode->clock);
b.full = dfixed_const(1000);
a.full = dfixed_div(a, b);
pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
b.full = rfixed_const(2);
b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
b.full = rfixed_mul(b, crtc->hsc);
c.full = rfixed_const(2);
b.full = rfixed_div(b, c);
consumption_time.full = rfixed_div(pclk, b);
b.full = dfixed_mul(b, crtc->hsc);
c.full = dfixed_const(2);
b.full = dfixed_div(b, c);
consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
a.full = rfixed_const(1);
wm->consumption_rate.full = rfixed_div(a, consumption_time);
a.full = dfixed_const(1);
wm->consumption_rate.full = dfixed_div(a, consumption_time);
 
 
/* Determine line time
281,8 → 285,8
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
line_time.full = rfixed_mul(a, pclk);
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
line_time.full = dfixed_mul(a, pclk);
 
/* Determine active time
* ActiveTime = time of active region of display within one line,
289,19 → 293,19
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->active_time.full = rfixed_mul(line_time, b);
wm->active_time.full = rfixed_div(wm->active_time, a);
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
wm->active_time.full = dfixed_mul(line_time, b);
wm->active_time.full = dfixed_div(wm->active_time, a);
 
/* Maximun bandwidth is the minimun bandwidth of all component */
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
if (sideport) {
if (rdev->mc.igp_sideport_enabled) {
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
read_delay_latency.full = rfixed_const(370 * 800 * 1000);
read_delay_latency.full = rfixed_div(read_delay_latency,
read_delay_latency.full = dfixed_const(370 * 800 * 1000);
read_delay_latency.full = dfixed_div(read_delay_latency,
rdev->pm.igp_sideport_mclk);
} else {
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
310,23 → 314,23
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
read_delay_latency.full = rfixed_const(5000);
read_delay_latency.full = dfixed_const(5000);
}
 
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
a.full = rfixed_const(16);
rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
a.full = rfixed_const(1000);
rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
a.full = dfixed_const(16);
rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
a.full = dfixed_const(1000);
rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(ns)
*/
a.full = rfixed_const(256 * 13);
chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
a.full = rfixed_const(10);
chunk_time.full = rfixed_div(chunk_time, a);
a.full = dfixed_const(256 * 13);
chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
a.full = dfixed_const(10);
chunk_time.full = dfixed_div(chunk_time, a);
 
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
336,13 → 340,13
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
if (rfixed_trunc(wm->num_line_pair) > 1) {
a.full = rfixed_const(3);
wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
if (dfixed_trunc(wm->num_line_pair) > 1) {
a.full = dfixed_const(3);
wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
a.full = rfixed_const(2);
wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
a.full = dfixed_const(2);
wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
}
 
356,34 → 360,34
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
wm->dbpp.full = rfixed_const(4 * 8);
wm->dbpp.full = dfixed_const(4 * 8);
 
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
a.full = dfixed_const(16);
wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
 
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = rfixed_const(10);
estimated_width.full = dfixed_div(estimated_width, consumption_time);
if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = dfixed_const(10);
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
a.full = dfixed_const(16);
wm->priority_mark.full = dfixed_div(estimated_width, a);
wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
395,9 → 399,13
struct rs690_watermark wm0;
struct rs690_watermark wm1;
u32 tmp;
u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
407,7 → 415,8
* modes if the user specifies HIGH for displaypriority
* option.
*/
if (rdev->disp_priority == 2) {
if ((rdev->disp_priority == 2) &&
((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
tmp &= C_000104_MC_DISP0R_INIT_LAT;
tmp &= C_000104_MC_DISP1R_INIT_LAT;
432,125 → 441,126
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
 
if (mode0 && mode1) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
if (rfixed_trunc(wm1.dbpp) > 64)
b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
if (dfixed_trunc(wm1.dbpp) > 64)
b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
fill_rate.full = rfixed_div(wm0.sclk, a);
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm0.active_time);
a.full = rfixed_mul(wm0.worst_case_latency,
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
b.full = rfixed_const(16 * 1000);
priority_mark02.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
} else {
a.full = rfixed_mul(wm0.worst_case_latency,
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark02.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm1.active_time);
a.full = rfixed_mul(wm1.worst_case_latency,
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
b.full = rfixed_const(16 * 1000);
priority_mark12.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
} else {
a.full = rfixed_mul(wm1.worst_case_latency,
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark12.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (rfixed_trunc(priority_mark02) < 0)
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (rfixed_trunc(priority_mark12) < 0)
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
} else if (mode0) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
fill_rate.full = rfixed_div(wm0.sclk, a);
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm0.active_time);
a.full = rfixed_mul(wm0.worst_case_latency,
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
b.full = rfixed_const(16 * 1000);
priority_mark02.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
} else {
a.full = rfixed_mul(wm0.worst_case_latency,
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark02.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (rfixed_trunc(priority_mark02) < 0)
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
S_006D48_D2MODE_PRIORITY_A_OFF(1));
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
} else {
if (rfixed_trunc(wm1.dbpp) > 64)
a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
} else if (mode1) {
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
fill_rate.full = rfixed_div(wm1.sclk, a);
fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm1.active_time);
a.full = rfixed_mul(wm1.worst_case_latency,
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
b.full = rfixed_const(16 * 1000);
priority_mark12.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
} else {
a.full = rfixed_mul(wm1.worst_case_latency,
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark12.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (rfixed_trunc(priority_mark12) < 0)
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
S_006548_D1MODE_PRIORITY_A_OFF(1));
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
S_00654C_D1MODE_PRIORITY_B_OFF(1));
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
 
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
 
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
611,12 → 621,9
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
638,6 → 645,8
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
653,7 → 662,7
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
665,8 → 674,6
 
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize memory controller */
rs690_mc_init(rdev);
rv515_debugfs(rdev);
/drivers/video/drm/radeon/rs690d.h
182,6 → 182,9
#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
/drivers/video/drm/radeon/rv515.c
29,6 → 29,7
#include "drmP.h"
#include "rv515d.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "rv515_reg_safe.h"
 
67,13 → 68,13
ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(0x170C, 0));
radeon_ring_write(rdev, 1 << 31);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(0x42C8, 0));
radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(rdev, 0);
145,19 → 146,14
{
unsigned pipe_select_current, gb_pipe_select, tmp;
 
r100_hdp_reset(rdev);
r100_rb2d_reset(rdev);
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
 
rv515_vga_render_disable(rdev);
 
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
tmp = RREG32(R300_DST_PIPE_CONFIG);
pipe_select_current = (tmp >> 2) & 3;
tmp = (1 << pipe_select_current) |
(((gb_pipe_select >> 8) & 0xF) << 4);
172,91 → 168,6
}
}
 
int rv515_ga_reset(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
 
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
for (i = 0; i < rdev->usec_timeout; i++) {
WREG32(CP_CSQ_MODE, 0);
WREG32(CP_CSQ_CNTL, 0);
WREG32(RBBM_SOFT_RESET, 0x32005);
(void)RREG32(RBBM_SOFT_RESET);
udelay(200);
WREG32(RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RBBM_STATUS);
if (tmp & ((1 << 20) | (1 << 26))) {
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
WREG32(0x43E0, 0);
WREG32(0x43E4, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
break;
}
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RBBM_STATUS);
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
return -1;
}
 
int rv515_gpu_reset(struct radeon_device *rdev)
{
uint32_t status;
 
/* reset order likely matter */
status = RREG32(RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
}
/* reset GA */
if (status & ((1 << 20) | (1 << 26))) {
rv515_ga_reset(rdev);
}
/* reset CP */
status = RREG32(RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
}
/* Check if GPU is idle */
status = RREG32(RBBM_STATUS);
if (status & (1 << 31)) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
return 0;
}
 
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
279,19 → 190,14
 
void rv515_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
 
rv515_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
radeon_update_bandwidth_info(rdev);
}
 
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
339,7 → 245,7
 
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
radeon_gpu_reset(rdev);
radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
485,12 → 391,9
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
// r = r100_wb_init(rdev);
// if (r)
// dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
// r = r100_ib_init(rdev);
// if (r) {
// dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
515,6 → 418,8
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
529,7 → 434,7
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
540,8 → 445,6
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
840,20 → 743,20
return;
}
 
if (crtc->vsc.full > rfixed_const(2))
wm->num_line_pair.full = rfixed_const(2);
if (crtc->vsc.full > dfixed_const(2))
wm->num_line_pair.full = dfixed_const(2);
else
wm->num_line_pair.full = rfixed_const(1);
wm->num_line_pair.full = dfixed_const(1);
 
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
b.full = dfixed_const(mode->crtc_hdisplay);
c.full = dfixed_const(256);
a.full = dfixed_div(b, c);
request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
 
/* Determine consumption rate
862,23 → 765,23
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
a.full = rfixed_const(mode->clock);
b.full = rfixed_const(1000);
a.full = rfixed_div(a, b);
pclk.full = rfixed_div(b, a);
a.full = dfixed_const(mode->clock);
b.full = dfixed_const(1000);
a.full = dfixed_div(a, b);
pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
b.full = rfixed_const(2);
b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
b.full = rfixed_mul(b, crtc->hsc);
c.full = rfixed_const(2);
b.full = rfixed_div(b, c);
consumption_time.full = rfixed_div(pclk, b);
b.full = dfixed_mul(b, crtc->hsc);
c.full = dfixed_const(2);
b.full = dfixed_div(b, c);
consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
a.full = rfixed_const(1);
wm->consumption_rate.full = rfixed_div(a, consumption_time);
a.full = dfixed_const(1);
wm->consumption_rate.full = dfixed_div(a, consumption_time);
 
 
/* Determine line time
886,8 → 789,8
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
line_time.full = rfixed_mul(a, pclk);
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
line_time.full = dfixed_mul(a, pclk);
 
/* Determine active time
* ActiveTime = time of active region of display within one line,
894,10 → 797,10
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->active_time.full = rfixed_mul(line_time, b);
wm->active_time.full = rfixed_div(wm->active_time, a);
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
wm->active_time.full = dfixed_mul(line_time, b);
wm->active_time.full = dfixed_div(wm->active_time, a);
 
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
904,9 → 807,9
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(Mhz)
*/
a.full = rfixed_const(600 * 1000);
chunk_time.full = rfixed_div(a, rdev->pm.sclk);
read_delay_latency.full = rfixed_const(1000);
a.full = dfixed_const(600 * 1000);
chunk_time.full = dfixed_div(a, rdev->pm.sclk);
read_delay_latency.full = dfixed_const(1000);
 
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
916,9 → 819,9
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
if (rfixed_trunc(wm->num_line_pair) > 1) {
a.full = rfixed_const(3);
wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
if (dfixed_trunc(wm->num_line_pair) > 1) {
a.full = dfixed_const(3);
wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
934,34 → 837,34
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
wm->dbpp.full = rfixed_const(2 * 16);
wm->dbpp.full = dfixed_const(2 * 16);
 
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
a.full = dfixed_const(16);
wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
 
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
estimated_width.full = dfixed_div(estimated_width, consumption_time);
if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = wm->priority_mark_max.full;
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
a.full = dfixed_const(16);
wm->priority_mark.full = dfixed_div(estimated_width, a);
wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
973,6 → 876,8
struct rv515_watermark wm0;
struct rv515_watermark wm1;
u32 tmp;
u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
 
990,121 → 895,126
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
 
if (mode0 && mode1) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
if (rfixed_trunc(wm1.dbpp) > 64)
b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
if (dfixed_trunc(wm1.dbpp) > 64)
b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
fill_rate.full = rfixed_div(wm0.sclk, a);
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm0.active_time);
a.full = rfixed_const(16);
b.full = rfixed_div(b, a);
a.full = rfixed_mul(wm0.worst_case_latency,
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
a.full = rfixed_mul(wm0.worst_case_latency,
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark02.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm1.active_time);
a.full = rfixed_const(16);
b.full = rfixed_div(b, a);
a.full = rfixed_mul(wm1.worst_case_latency,
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
a.full = rfixed_mul(wm1.worst_case_latency,
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark12.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (rfixed_trunc(priority_mark02) < 0)
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (rfixed_trunc(priority_mark12) < 0)
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
} else if (mode0) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
fill_rate.full = rfixed_div(wm0.sclk, a);
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm0.active_time);
a.full = rfixed_const(16);
b.full = rfixed_div(b, a);
a.full = rfixed_mul(wm0.worst_case_latency,
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
a.full = rfixed_mul(wm0.worst_case_latency,
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = rfixed_const(16);
priority_mark02.full = rfixed_div(a, b);
b.full = dfixed_const(16);
priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (rfixed_trunc(priority_mark02) < 0)
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
} else {
if (rfixed_trunc(wm1.dbpp) > 64)
a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
} else if (mode1) {
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
fill_rate.full = rfixed_div(wm1.sclk, a);
fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = rfixed_mul(b, wm1.active_time);
a.full = rfixed_const(16);
b.full = rfixed_div(b, a);
a.full = rfixed_mul(wm1.worst_case_latency,
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
a.full = rfixed_mul(wm1.worst_case_latency,
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = rfixed_const(16 * 1000);
priority_mark12.full = rfixed_div(a, b);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (rfixed_trunc(priority_mark12) < 0)
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
 
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
 
void rv515_bandwidth_update(struct radeon_device *rdev)
1113,6 → 1023,8
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
1122,7 → 1034,8
* modes if the user specifies HIGH for displaypriority
* option.
*/
if (rdev->disp_priority == 2) {
if ((rdev->disp_priority == 2) &&
(rdev->family == CHIP_RV515)) {
tmp = RREG32_MC(MC_MISC_LAT_TIMER);
tmp &= ~MC_DISP1R_INIT_LAT_MASK;
tmp &= ~MC_DISP0R_INIT_LAT_MASK;
/drivers/video/drm/radeon/rv515d.h
217,6 → 217,52
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
/drivers/video/drm/radeon/rv770.c
27,8 → 27,10
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
39,6 → 41,7
 
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
 
 
/*
125,9 → 128,9
 
void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rv770_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
 
 
172,7 → 175,10
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
/* r7xx hw bug. Read from HDP_DEBUG1 rather
* than writing to HDP_REG_COHERENCY_FLUSH_CNTL
*/
tmp = RREG32(HDP_DEBUG1);
 
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
207,7 → 213,7
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
232,10 → 238,11
*/
void r700_cp_stop(struct radeon_device *rdev)
{
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
}
 
 
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
245,7 → 252,11
return -EINVAL;
 
r700_cp_stop(rdev);
WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
WREG32(CP_RB_CNTL,
#ifdef __BIG_ENDIAN
BUF_SWAP_32BIT |
#endif
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
448,6 → 459,49
return backend_map;
}
 
static void rv770_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer, mc_shared_chremap, tmp;
bool force_no_swizzle;
 
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
case 2:
case 3:
if (force_no_swizzle)
mc_shared_chremap = 0x00fac688;
else
mc_shared_chremap = 0x00bbc298;
break;
}
 
if (rdev->family == CHIP_RV740)
tcp_chan_steer = 0x00ef2a60;
else
tcp_chan_steer = 0x00fac688;
 
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
603,10 → 657,11
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
 
gb_tiling_config |= GROUP_SIZE(0);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.rv770.tiling_group_size = 512;
else
rdev->config.rv770.tiling_group_size = 256;
 
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
638,19 → 693,25
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
 
rdev->config.rv770.tile_config = gb_tiling_config;
gb_tiling_config |= BACKEND_MAP(backend_map);
 
 
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
rv770_program_channel_remap(rdev);
 
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
 
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
862,9 → 923,95
 
}
 
static int rv770_vram_scratch_init(struct radeon_device *rdev)
{
int r;
u64 gpu_addr;
 
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
return r;
}
}
 
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->vram_scratch.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
 
return r;
}
 
static void rv770_vram_scratch_fini(struct radeon_device *rdev)
{
int r;
 
if (rdev->vram_scratch.robj == NULL) {
return;
}
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->vram_scratch.robj);
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
}
radeon_bo_unref(&rdev->vram_scratch.robj);
}
 
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
 
if (mc->mc_vram_size > 0xE0000000) {
/* leave room for at least 512M GTT */
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xE0000000;
mc->mc_vram_size = 0xE0000000;
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = size_bf;
mc->mc_vram_size = size_bf;
}
mc->vram_start = mc->gtt_start - mc->mc_vram_size;
} else {
if (mc->mc_vram_size > size_af) {
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = size_af;
mc->mc_vram_size = size_af;
}
mc->vram_start = mc->gtt_end;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
} else {
radeon_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
}
}
 
int rv770_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
int chansize, numchan;
 
896,37 → 1043,25
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
 
return 0;
}
 
int rv770_gpu_reset(struct radeon_device *rdev)
{
/* FIXME: implement any rv770 specific bits */
return r600_gpu_reset(rdev);
}
 
static int rv770_startup(struct radeon_device *rdev)
{
int r;
 
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
943,6 → 1078,9
if (r)
return r;
}
r = rv770_vram_scratch_init(rdev);
if (r)
return r;
rv770_gpu_init(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
953,8 → 1091,7
r = r600_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
// r600_wb_enable(rdev);
 
return 0;
}
 
974,13 → 1111,6
{
int r;
 
r = radeon_dummy_page_init(rdev);
if (r)
return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
995,7 → 1125,7
if (r)
return r;
/* Post card if necessary */
if (!r600_card_posted(rdev)) {
if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
1009,11 → 1139,6
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
/* Initialize power management */
radeon_pm_init(rdev);
/* Fence driver */
// r = radeon_fence_driver_init(rdev);
// if (r)
1062,6 → 1187,81
// }
// }
}
 
return 0;
}
 
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
 
if (radeon_pcie_gen2 == 0)
return;
 
if (rdev->flags & RADEON_IS_IGP)
return;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return;
 
/* x2 cards have a special sequence */
if (ASIC_IS_X2(rdev))
return;
 
/* advertise upconfig capability */
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW |
LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
tmp = RREG32(0x541c);
WREG32(0x541c, tmp | 0x8);
WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
link_cntl2 = RREG16(0x4088);
link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
link_cntl2 |= 0x2;
WREG16(0x4088, link_cntl2);
WREG32(MM_CFGREGS_CNTL, 0);
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
 
} else {
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
/drivers/video/drm/radeon/rv770d.h
122,6 → 122,11
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
 
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
#define ASIC_T_SHIFT 16
 
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
128,10 → 133,12
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
#define HDP_DEBUG1 0x2F34
 
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
#define MC_SHARED_CHREMAP 0x2008
 
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
297,6 → 304,7
#define BILINEAR_PRECISION_8_BIT (1 << 31)
 
#define TCP_CNTL 0x9610
#define TCP_CHAN_STEER 0x9614
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
345,4 → 353,49
 
#define SRBM_STATUS 0x0E50
 
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
 
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
# define LC_LINK_WIDTH_MASK 0x7
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
# define LC_LINK_WIDTH_RD_SHIFT 4
# define LC_LINK_WIDTH_RD_MASK 0x70
# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
# define LC_RECONFIG_NOW (1 << 8)
# define LC_RENEGOTIATION_SUPPORT (1 << 9)
# define LC_RENEGOTIATE_EN (1 << 10)
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
#define MM_CFGREGS_CNTL 0x544c
# define MM_WR_TO_CFG_EN (1 << 3)
#define LINK_CNTL2 0x88 /* F0 */
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
#endif