Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 1403 → Rev 1404

/drivers/video/drm/drm_crtc.c
158,6 → 158,7
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
};
 
static struct drm_prop_enum_list drm_encoder_enum_list[] =
/drivers/video/drm/drm_crtc_helper.c
216,7 → 216,7
EXPORT_SYMBOL(drm_helper_crtc_in_use);
 
/**
* drm_disable_unused_functions - disable unused objects
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
572,7 → 572,7
struct drm_crtc *tmp;
int crtc_mask = 1;
 
// WARN(!crtc, "checking null crtc?");
WARN(!crtc, "checking null crtc?");
 
dev = crtc->dev;
 
702,7 → 702,7
if (encoder->crtc != crtc)
continue;
 
DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
mode->name, mode->base.id);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
1021,9 → 1021,9
int count = 0;
 
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
// drm_helper_disable_unused_functions(dev);
 
drm_fb_helper_parse_command_line(dev);
// drm_fb_helper_parse_command_line(dev);
 
count = drm_helper_probe_connector_modes(dev,
dev->mode_config.max_width,
1032,7 → 1032,8
/*
* we shouldn't end up with no modes here.
*/
// WARN(!count, "Connected connector with 0 modes\n");
if (count == 0)
printk(KERN_INFO "No connectors reported connected with modes\n");
 
drm_setup_crtcs(dev);
 
1162,6 → 1163,9
int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1174,7 → 1178,26
 
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
 
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 
if(encoder->crtc != crtc)
continue;
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
 
crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
}
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
return 0;
/drivers/video/drm/drm_dp_i2c_helper.c
0,0 → 1,209
/*
* Copyright © 2009 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
 
#include <linux/kernel.h>
#include <linux/module.h>
//#include <linux/delay.h>
//#include <linux/slab.h>
//#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include "drm_dp_helper.h"
#include "drmP.h"
 
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
 
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
}
 
/*
* I2C over AUX CH
*/
 
/*
* Send the address. If the I2C link is running, this 'restarts'
* the connection with the new address, this is used for doing
* a write followed by a read (as needed for DDC)
*/
static int
i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_START;
int ret;
 
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
algo_data->address = address;
algo_data->running = true;
ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
return ret;
}
 
/*
* Stop the I2C transaction. This closes out the link, sending
* a bare address packet with the MOT bit turned off
*/
static void
i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_STOP;
 
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
if (algo_data->running) {
(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
algo_data->running = false;
}
}
 
/*
* Write a single byte to the current I2C address, the
* the I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
 
if (!algo_data->running)
return -EIO;
 
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
return ret;
}
 
/*
* Read a single byte from the current I2C address, the
* I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
 
if (!algo_data->running)
return -EIO;
 
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
return ret;
}
 
static int
i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
int ret = 0;
bool reading = false;
int m;
int b;
 
for (m = 0; m < num; m++) {
u16 len = msgs[m].len;
u8 *buf = msgs[m].buf;
reading = (msgs[m].flags & I2C_M_RD) != 0;
ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
if (ret < 0)
break;
if (reading) {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
if (ret < 0)
break;
}
} else {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
if (ret < 0)
break;
}
}
if (ret < 0)
break;
}
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
 
static u32
i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
 
static const struct i2c_algorithm i2c_dp_aux_algo = {
.master_xfer = i2c_algo_dp_aux_xfer,
.functionality = i2c_algo_dp_aux_functionality,
};
 
static void
i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
 
}
 
static int
i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
{
adapter->algo = &i2c_dp_aux_algo;
adapter->retries = 3;
i2c_dp_aux_reset_bus(adapter);
return 0;
}
 
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
 
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
// error = i2c_add_adapter(adapter);
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
/drivers/video/drm/drm_edid.c
633,8 → 633,7
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
printk(KERN_WARNING "integrated sync not supported\n");
return NULL;
printk(KERN_WARNING "composite sync not supported\n");
}
 
/* it is incorrect if hsync/vsync width is zero */
911,23 → 910,27
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
const u8 empty[3] = { 0, 0, 0 };
 
for (i = 0; i < 4; i++) {
int width, height;
int uninitialized_var(width), height;
cvt = &(timing->data.other_data.data.cvt[i]);
 
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
switch (cvt->code[1] & 0xc0) {
if (!memcmp(cvt->code, empty, 3))
continue;
 
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
switch (cvt->code[1] & 0x0c) {
case 0x00:
width = height * 4 / 3;
break;
case 0x40:
case 0x04:
width = height * 16 / 9;
break;
case 0x80:
case 0x08:
width = height * 16 / 10;
break;
case 0xc0:
case 0x0c:
width = height * 15 / 9;
break;
}
/drivers/video/drm/drm_fb_helper.c
175,7 → 175,7
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
392,11 → 392,10
return -EINVAL;
 
/* Need to resize the fb object !!! */
if (var->xres > fb->width || var->yres > fb->height) {
DRM_ERROR("Requested width/height is greater than current fb "
"object %dx%d > %dx%d\n", var->xres, var->yres,
fb->width, fb->height);
DRM_ERROR("Need resizing code.\n");
if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
"object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
 
/drivers/video/drm/drm_mm.c
355,7 → 355,7
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
405,7 → 405,7
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
/drivers/video/drm/drm_modes.c
1,9 → 1,4
/*
* The list_sort function is (presumably) licensed under the GPL (see the
* top level "COPYING" file for details).
*
* The remainder of this file is:
*
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
36,6 → 31,7
*/
 
#include <linux/list.h>
#include <linux/list_sort.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
855,6 → 851,7
 
/**
* drm_mode_compare - compare modes for favorability
* @priv: unused
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
868,7 → 865,7
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
885,85 → 882,6
return diff;
}
 
/* FIXME: what we don't have a list sort function? */
/* list sort from Mark J Roberts (mjr@znex.org) */
void list_sort(struct list_head *head,
int (*cmp)(struct list_head *a, struct list_head *b))
{
struct list_head *p, *q, *e, *list, *tail, *oldhead;
int insize, nmerges, psize, qsize, i;
 
list = head->next;
list_del(head);
insize = 1;
for (;;) {
p = oldhead = list;
list = tail = NULL;
nmerges = 0;
 
while (p) {
nmerges++;
q = p;
psize = 0;
for (i = 0; i < insize; i++) {
psize++;
q = q->next == oldhead ? NULL : q->next;
if (!q)
break;
}
 
qsize = insize;
while (psize > 0 || (qsize > 0 && q)) {
if (!psize) {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
} else if (!qsize || !q) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else if (cmp(p, q) <= 0) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
}
if (tail)
tail->next = e;
else
list = e;
e->prev = tail;
tail = e;
}
p = q;
}
 
tail->next = list;
list->prev = tail;
 
if (nmerges <= 1)
break;
 
insize *= 2;
}
 
head->next = list;
head->prev = list->prev;
list->prev->next = head;
list->prev = head;
}
 
/**
* drm_mode_sort - sort mode list
* @mode_list: list to sort
975,7 → 893,7
*/
void drm_mode_sort(struct list_head *mode_list)
{
list_sort(mode_list, drm_mode_compare);
list_sort(NULL, mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
 
/drivers/video/drm/idr.c
34,7 → 34,56
#include "drmP.h"
#include "drm_crtc.h"
 
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
 
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
 
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found:
return result + __ffs(tmp);
}
 
int find_next_bit(const unsigned long *addr, int size, int offset)
{
const unsigned long *p = addr + (offset >> 5);
int set = 0, bit = offset & 31, res;
 
if (bit)
{
/*
* Look for nonzero in the first 32 bits:
*/
__asm__("bsfl %1,%0\n\t"
"jne 1f\n\t"
"movl $32, %0\n"
"1:"
: "=r" (set)
: "r" (*p >> bit));
if (set < (32 - bit))
return set + offset;
set = 32 - bit;
p++;
}
/*
* No set bit yet, search remaining full words for a bit
*/
res = find_first_bit (p, size - 32 * (p - addr));
return (offset + set + res);
}
 
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
#define rcu_dereference(p) ({ \
/drivers/video/drm/includes/errno.h
File deleted
/drivers/video/drm/includes/pci.h
File deleted
/drivers/video/drm/includes/linux/kernel.h
78,6 → 78,22
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
 
/*
* ..and if you can't take the strict
* types, you can specify one yourself.
*
* Or not use min/max/clamp at all, of course.
*/
#define min_t(type, x, y) ({ \
type __min1 = (x); \
type __min2 = (y); \
__min1 < __min2 ? __min1: __min2; })
 
#define max_t(type, x, y) ({ \
type __max1 = (x); \
type __max2 = (y); \
__max1 > __max2 ? __max1: __max2; })
 
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
/drivers/video/drm/includes/linux/sched.h
1,0 → 0,0
/* stub */
 
static inline void mdelay(unsigned long time)
{
time /= 10;
if(!time) time = 1;
 
__asm__ __volatile__ (
"call *__imp__Delay"
::"b" (time));
__asm__ __volatile__ (
"":::"ebx");
 
};
 
static inline void udelay(unsigned long delay)
{
if(!delay) delay++;
delay*= 500;
 
while(delay--)
{
__asm__ __volatile__(
"xorl %%eax, %%eax \n\t"
"cpuid"
:::"eax","ebx","ecx","edx" );
}
}
 
/drivers/video/drm/list_sort.c
0,0 → 1,101
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/list.h>
 
/**
* list_sort - sort a list.
* @priv: private data, passed to @cmp
* @head: the list to sort
* @cmp: the elements comparison function
*
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted
* in ascending order.
*
* The comparison function @cmp is supposed to return a negative value if @a is
* less than @b, and a positive value if @a is greater than @b. If @a and @b
* are equivalent, then it does not matter what this function returns.
*/
void list_sort(void *priv, struct list_head *head,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b))
{
struct list_head *p, *q, *e, *list, *tail, *oldhead;
int insize, nmerges, psize, qsize, i;
 
if (list_empty(head))
return;
 
list = head->next;
list_del(head);
insize = 1;
for (;;) {
p = oldhead = list;
list = tail = NULL;
nmerges = 0;
 
while (p) {
nmerges++;
q = p;
psize = 0;
for (i = 0; i < insize; i++) {
psize++;
q = q->next == oldhead ? NULL : q->next;
if (!q)
break;
}
 
qsize = insize;
while (psize > 0 || (qsize > 0 && q)) {
if (!psize) {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
} else if (!qsize || !q) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else if (cmp(priv, p, q) <= 0) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
}
if (tail)
tail->next = e;
else
list = e;
e->prev = tail;
tail = e;
}
p = q;
}
 
tail->next = list;
list->prev = tail;
 
if (nmerges <= 1)
break;
 
insize *= 2;
}
 
head->next = list;
head->prev = list->prev;
list->prev->next = head;
list->prev = head;
}
 
EXPORT_SYMBOL(list_sort);
/drivers/video/drm/radeon/radeon_cursor.c
File deleted
/drivers/video/drm/radeon/atombios_crtc.c
305,7 → 305,6
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
 
printk("executing set crtc dtd timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
345,7 → 344,6
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
 
printk("executing set crtc timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
407,59 → 405,57
}
}
 
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
union adjust_pixel_clock {
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
};
 
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct radeon_pll *pll)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
uint8_t frev, crev;
int index;
SET_PIXEL_CLOCK_PS_ALLOCATION args;
PIXEL_CLOCK_PARAMETERS *spc1_ptr;
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
uint32_t pll_clock = mode->clock;
uint32_t adjusted_clock;
uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int pll_flags = 0;
u32 adjusted_clock = mode->clock;
 
memset(&args, 0, sizeof(args));
/* reset the pll flags */
pll->flags = 0;
 
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
RADEON_PLL_PREFER_CLOSEST_LOWER);
 
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
} else {
pll_flags |= RADEON_PLL_LEGACY;
pll->flags |= RADEON_PLL_LEGACY;
 
if (mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
if (!ASIC_IS_AVIVO(rdev)) {
if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type ==
DRM_MODE_ENCODER_LVDS)
pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_encoder = to_radeon_encoder(encoder);
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
pll->flags |= RADEON_PLL_USE_REF_DIV;
}
radeon_encoder = to_radeon_encoder(encoder);
break;
}
}
469,46 → 465,101
* special hw requirements.
*/
if (ASIC_IS_DCE3(rdev)) {
ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
union adjust_pixel_clock args;
struct radeon_encoder_atom_dig *dig;
u8 frev, crev;
int index;
 
if (!encoder)
return;
if (!radeon_encoder->enc_priv)
return adjusted_clock;
dig = radeon_encoder->enc_priv;
 
memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
 
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
memset(&args, 0, sizeof(args));
 
switch (frev) {
case 1:
switch (crev) {
case 1:
case 2:
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
 
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&adjust_pll_args);
adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
} else {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (ASIC_IS_AVIVO(rdev) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
adjusted_clock = mode->clock * 2;
else
adjusted_clock = mode->clock;
index, (uint32_t *)&args);
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return adjusted_clock;
}
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return adjusted_clock;
}
}
return adjusted_clock;
}
 
union set_pixel_clock {
SET_PIXEL_CLOCK_PS_ALLOCATION base;
PIXEL_CLOCK_PARAMETERS v1;
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
};
 
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u8 frev, crev;
int index;
union set_pixel_clock args;
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
 
memset(&args, 0, sizeof(args));
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
break;
}
}
 
if (!radeon_encoder)
return;
 
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
 
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
 
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
&ref_div, &post_div);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
&ref_div, &post_div);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
&ref_div, &post_div);
 
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
518,45 → 569,38
case 1:
switch (crev) {
case 1:
spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
spc1_ptr->ucFracFbDiv = frac_fb_div;
spc1_ptr->ucPostDiv = post_div;
spc1_ptr->ucPpll =
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.usRefDiv = cpu_to_le16(ref_div);
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
args.v1.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
spc1_ptr->ucRefDivSrc = 1;
args.v1.ucCRTC = radeon_crtc->crtc_id;
args.v1.ucRefDivSrc = 1;
break;
case 2:
spc2_ptr =
(PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
spc2_ptr->ucFracFbDiv = frac_fb_div;
spc2_ptr->ucPostDiv = post_div;
spc2_ptr->ucPpll =
args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v2.usRefDiv = cpu_to_le16(ref_div);
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
args.v2.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
spc2_ptr->ucRefDivSrc = 1;
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucRefDivSrc = 1;
break;
case 3:
if (!encoder)
return;
spc3_ptr =
(PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
spc3_ptr->ucFracFbDiv = frac_fb_div;
spc3_ptr->ucPostDiv = post_div;
spc3_ptr->ucPpll =
args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v3.usRefDiv = cpu_to_le16(ref_div);
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
args.v3.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
spc3_ptr->ucEncoderMode =
args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
args.v3.ucEncoderMode =
atombios_get_encoder_mode(encoder);
break;
default:
569,11 → 613,10
return;
}
 
printk("executing set pll\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
ENTER();
598,15 → 641,18
 
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
obj_priv = obj->driver_private;
rbo = obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
 
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
// return -EINVAL;
// }
 
fb_location = rdev->mc.vram_location;
tiling_flags = 0;
 
switch (crtc->fb->bits_per_pixel) {
case 8:
fb_format =
687,10 → 733,15
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
 
// if (old_fb && old_fb != crtc->fb) {
// radeon_fb = to_radeon_framebuffer(old_fb);
// radeon_gem_object_unpin(radeon_fb->obj);
// }
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
 
/* Bytes per pixel may have changed */
radeon_bandwidth_update(rdev);
700,6 → 751,42
return 0;
}
 
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_set_base(crtc, x, y, old_fb);
else
return radeon_crtc_set_base(crtc, x, y, old_fb);
}
 
/* properly set additional regs when using atombios */
static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
u32 disp_merge_cntl;
 
switch (radeon_crtc->crtc_id) {
case 0:
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
break;
case 1:
disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
break;
}
}
 
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
721,8 → 808,8
else {
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_set_surface(crtc);
atombios_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_fixup(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
740,8 → 827,8
 
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
atombios_lock_crtc(crtc, 1);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
atombios_lock_crtc(crtc, 1);
}
 
static void atombios_crtc_commit(struct drm_crtc *crtc)
/drivers/video/drm/radeon/atombios_dp.c
332,11 → 332,13
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int retry_count = 0;
 
memset(&args, 0, sizeof(args));
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
retry:
memcpy(base, req_bytes, num_bytes);
 
args.lpAuxRequest = 0;
347,10 → 349,12
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
if (args.ucReplyStatus) {
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
if (args.ucReplyStatus && !args.ucDataOutLen) {
if (args.ucReplyStatus == 0x20 && retry_count < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
chan->rec.i2c_id, args.ucReplyStatus);
chan->rec.i2c_id, args.ucReplyStatus, retry_count);
return false;
}
 
/drivers/video/drm/radeon/cmdline.c
1,5 → 1,4
 
#include <stdint.h>
#include <drm/drmP.h>
#include <drm.h>
#include <drm_mm.h>
24,7 → 23,7
}
}
 
char* parse_mode(char *p, mode_t *mode)
char* parse_mode(char *p, videomode_t *mode)
{
char c;
 
63,7 → 62,7
return p;
};
 
void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms)
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms)
{
char *p = cmdline;
 
/drivers/video/drm/radeon/display.h
20,7 → 20,7
uint32_t hot_y;
 
struct list_head list;
struct radeon_object *robj;
struct radeon_bo *robj;
}cursor_t;
 
#define CURSOR_WIDTH 64
/drivers/video/drm/radeon/makefile
1,23 → 1,30
 
CC = gcc
FASM = e:/fasm/fasm.exe
CFLAGS = -c -O2 -fomit-frame-pointer -fno-builtin-printf
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0 --file-alignment 512 --section-alignment 4096
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DRM_TOPDIR = $(CURDIR)/..
DRM_INCLUDES = $(DRM_TOPDIR)/includes
 
INCLUDES = -I$(DRM_INCLUDES) -I$(DRM_INCLUDES)/drm \
-I$(DRM_INCLUDES)/linux -I$(DRM_INCLUDES)/asm
 
CFLAGS = -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
 
LIBPATH:= .
 
LIBS:= -ldrv -lcore
 
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
--file-alignment 512 --section-alignment 4096
 
 
NAME:= atikms
 
INCLUDES = -I$(DRM_INCLUDES) -I$(DRM_INCLUDES)/linux -I$(DRM_INCLUDES)/drm
 
HFILES:= $(DRM_INCLUDES)/linux/types.h \
$(DRM_INCLUDES)/linux/list.h \
$(DRM_INCLUDES)/pci.h \
$(DRM_INCLUDES)/linux/pci.h \
$(DRM_INCLUDES)/drm/drm.h \
$(DRM_INCLUDES)/drm/drmP.h \
$(DRM_INCLUDES)/drm/drm_edid.h \
36,10 → 43,11
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/idr.c \
radeon_gem.c \
$(DRM_TOPDIR)/list_sort.c \
radeon_device.c \
radeon_clocks.c \
radeon_i2c.c \
47,6 → 55,7
radeon_atombios.c \
radeon_agp.c \
atombios_crtc.c \
atombios_dp.c \
radeon_encoders.c \
radeon_connectors.c \
radeon_bios.c \
55,9 → 64,10
radeon_legacy_encoders.c \
radeon_legacy_tv.c \
radeon_display.c \
radeon_object.c \
radeon_gart.c \
radeon_ring.c \
radeon_object_kos.c \
radeon_gem.c \
r100.c \
r200.c \
r300.c \
65,6 → 75,8
rv515.c \
r520.c \
r600.c \
r600_audio.c \
r600_hdmi.c \
rs400.c \
rs600.c \
rs690.c \
92,7 → 104,7
 
 
%.o : %.c $(HFILES) Makefile
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ -c $<
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
 
%.o : %.S $(HFILES) Makefile
as -o $@ $<
/drivers/video/drm/radeon/pci.c
345,7 → 345,7
 
hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE);
 
dev = (pci_dev_t*)kzalloc(sizeof(dev_t), 0);
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
 
INIT_LIST_HEAD(&dev->link);
 
407,7 → 407,7
void pci_scan_bus(u32_t bus)
{
u32_t devfn;
dev_t *dev;
pci_dev_t *dev;
 
 
for (devfn = 0; devfn < 0x100; devfn += 8)
/drivers/video/drm/radeon/r100.c
272,11 → 272,17
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
 
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
343,9 → 349,15
 
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
// radeon_object_kunmap(rdev->wb.wb_obj);
// radeon_object_unpin(rdev->wb.wb_obj);
// radeon_object_unref(&rdev->wb.wb_obj);
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
dev_err(rdev->dev, "(%d) can't finish WB\n", r);
return;
}
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
532,7 → 544,6
return err;
}
 
 
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
2814,6 → 2825,7
}
/* Enable IRQ */
// r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
// r = r100_cp_init(rdev, 1024 * 1024);
// if (r) {
/drivers/video/drm/radeon/r300.c
152,10 → 152,14
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
508,11 → 512,14
 
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
 
tmp = RREG32(RADEON_MEM_CNTL);
if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
rdev->mc.vram_width = 128;
} else {
rdev->mc.vram_width = 64;
tmp &= R300_MEM_NUM_CHANNELS_MASK;
switch (tmp) {
case 0: rdev->mc.vram_width = 64; break;
case 1: rdev->mc.vram_width = 128; break;
case 2: rdev->mc.vram_width = 256; break;
default: rdev->mc.vram_width = 128; break;
}
 
r100_vram_init_sizes(rdev);
1355,7 → 1362,7
// if (r)
// return r;
/* Memory manager */
r = radeon_object_init(rdev);
r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCIE) {
1382,7 → 1389,7
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
// radeon_irq_kms_fini(rdev);
// radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
/drivers/video/drm/radeon/r420.c
348,7 → 348,7
if (r)
return r;
}
r300_set_reg_safe(rdev);
r420_set_reg_safe(rdev);
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
363,7 → 363,6
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
// radeon_agp_fini(rdev);
// radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
/drivers/video/drm/radeon/r600.c
1711,3 → 1711,18
return 0;
#endif
}
 
/**
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
* rdev: radeon device structure
* bo: buffer object struct which userspace is waiting for idle
*
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed
* through ring buffer, this leads to corruption in rendering, see
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
* directly perform HDP flush by writing register through MMIO.
*/
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
/drivers/video/drm/radeon/radeon.h
281,6 → 281,7
struct ttm_bo_kmap_obj kmap;
unsigned pin_count;
void *kptr;
u32 cpu_addr;
u32 tiling_flags;
u32 pitch;
int surface_reg;
287,6 → 288,7
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object *gobj;
u32 domain;
};
 
struct radeon_bo_list {
697,6 → 699,13
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
};
 
/*
1149,6 → 1158,7
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
/drivers/video/drm/radeon/radeon_asic.h
117,6 → 117,7
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
176,6 → 177,7
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
/*
219,6 → 221,7
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
267,6 → 270,7
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
323,6 → 327,7
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
370,6 → 375,7
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
421,6 → 427,7
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
 
463,6 → 470,7
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
};
 
/*
504,6 → 512,7
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
 
static struct radeon_asic r600_asic = {
.init = &r600_init,
537,6 → 546,7
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
};
 
/*
580,6 → 590,7
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
};
 
#endif
/drivers/video/drm/radeon/radeon_atombios.c
114,6 → 114,7
i2c.i2c_id = gpio->sucI2cId.ucAccess;
 
i2c.valid = true;
break;
}
}
 
345,7 → 346,9
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DisplayPort
DRM_MODE_CONNECTOR_DisplayPort,
DRM_MODE_CONNECTOR_eDP,
DRM_MODE_CONNECTOR_Unknown
};
 
bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
745,8 → 748,7
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
(1 <<
i),
(1 << i),
dac),
(1 << i));
}
758,32 → 760,30
if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) {
if (((bios_connectors[i].
devices &
(ATOM_DEVICE_DFP_SUPPORT))
&& (bios_connectors[j].
devices &
(ATOM_DEVICE_CRT_SUPPORT)))
||
((bios_connectors[j].
devices &
(ATOM_DEVICE_DFP_SUPPORT))
&& (bios_connectors[i].
devices &
(ATOM_DEVICE_CRT_SUPPORT)))) {
bios_connectors[i].
devices |=
bios_connectors[j].
devices;
bios_connectors[i].
connector_type =
/* make sure not to combine LVDS */
if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
bios_connectors[i].line_mux = 53;
bios_connectors[i].ddc_bus.valid = false;
continue;
}
if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
bios_connectors[j].line_mux = 53;
bios_connectors[j].ddc_bus.valid = false;
continue;
}
/* combine analog and digital for DVI-I */
if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
(bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
(bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
bios_connectors[i].devices |=
bios_connectors[j].devices;
bios_connectors[i].connector_type =
DRM_MODE_CONNECTOR_DVII;
if (bios_connectors[j].devices &
(ATOM_DEVICE_DFP_SUPPORT))
if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
bios_connectors[i].hpd =
bios_connectors[j].hpd;
bios_connectors[j].
valid = false;
bios_connectors[j].valid = false;
}
}
}
938,6 → 938,43
return false;
}
 
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
};
 
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
union igp_info *igp_info;
u8 frev, crev;
u16 data_offset;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
&crev, &data_offset);
 
igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
 
if (igp_info) {
switch (crev) {
case 1:
if (igp_info->info.ucMemoryType & 0xf0)
return true;
break;
case 2:
if (igp_info->info_2.ucMemoryType & 0x0f)
return true;
break;
default:
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
}
}
return false;
}
 
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds)
{
1029,6 → 1066,7
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
break;
}
}
}
1234,6 → 1272,61
return true;
}
 
enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
uint16_t data_offset;
uint8_t frev, crev;
struct _ATOM_ANALOG_TV_INFO *tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
 
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
 
tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
 
switch (tv_info->ucTV_BootUpDefaultStandard) {
case ATOM_TV_NTSC:
tv_std = TV_STD_NTSC;
DRM_INFO("Default TV standard: NTSC\n");
break;
case ATOM_TV_NTSCJ:
tv_std = TV_STD_NTSC_J;
DRM_INFO("Default TV standard: NTSC-J\n");
break;
case ATOM_TV_PAL:
tv_std = TV_STD_PAL;
DRM_INFO("Default TV standard: PAL\n");
break;
case ATOM_TV_PALM:
tv_std = TV_STD_PAL_M;
DRM_INFO("Default TV standard: PAL-M\n");
break;
case ATOM_TV_PALN:
tv_std = TV_STD_PAL_N;
DRM_INFO("Default TV standard: PAL-N\n");
break;
case ATOM_TV_PALCN:
tv_std = TV_STD_PAL_CN;
DRM_INFO("Default TV standard: PAL-CN\n");
break;
case ATOM_TV_PAL60:
tv_std = TV_STD_PAL_60;
DRM_INFO("Default TV standard: PAL-60\n");
break;
case ATOM_TV_SECAM:
tv_std = TV_STD_SECAM;
DRM_INFO("Default TV standard: SECAM\n");
break;
default:
tv_std = TV_STD_NTSC;
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
break;
}
return tv_std;
}
 
struct radeon_encoder_tv_dac *
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
{
1269,6 → 1362,7
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
 
tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
}
return tv_dac;
}
/drivers/video/drm/radeon/radeon_combios.c
595,6 → 595,48
return false;
}
 
bool radeon_combios_sideport_present(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
u16 igp_info;
 
igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
 
if (igp_info) {
if (RBIOS16(igp_info + 0x4))
return true;
}
return false;
}
 
static const uint32_t default_primarydac_adj[CHIP_LAST] = {
0x00000808, /* r100 */
0x00000808, /* rv100 */
0x00000808, /* rs100 */
0x00000808, /* rv200 */
0x00000808, /* rs200 */
0x00000808, /* r200 */
0x00000808, /* rv250 */
0x00000000, /* rs300 */
0x00000808, /* rv280 */
0x00000808, /* r300 */
0x00000808, /* r350 */
0x00000808, /* rv350 */
0x00000808, /* rv380 */
0x00000808, /* r420 */
0x00000808, /* r423 */
0x00000808, /* rv410 */
0x00000000, /* rs400 */
0x00000000, /* rs480 */
};
 
static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
struct radeon_encoder_primary_dac *p_dac)
{
p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
return;
}
 
struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
radeon_encoder
*encoder)
604,20 → 646,20
uint16_t dac_info;
uint8_t rev, bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
int found = 0;
 
if (rdev->bios == NULL)
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
GFP_KERNEL);
 
if (!p_dac)
return NULL;
 
if (rdev->bios == NULL)
goto out;
 
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
p_dac =
kzalloc(sizeof(struct radeon_encoder_primary_dac),
GFP_KERNEL);
 
if (!p_dac)
return NULL;
 
rev = RBIOS8(dac_info) & 0x3;
if (rev < 2) {
bg = RBIOS8(dac_info + 0x2) & 0xf;
628,20 → 670,26
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
 
found = 1;
}
 
out:
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
 
return p_dac;
}
 
static enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_encoder *encoder)
enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_device *dev = rdev->ddev;
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
 
if (rdev->bios == NULL)
return tv_std;
 
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
779,7 → 827,7
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
found = 1;
}
tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
}
if (!found) {
/* then check CRT table */
923,8 → 971,7
lvds->native_mode.vdisplay);
 
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
lvds->panel_vcc_delay = 2000;
lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
 
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
/drivers/video/drm/radeon/radeon_connectors.c
580,7 → 580,7
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
bool dret;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
 
encoder = radeon_best_single_encoder(connector);
587,9 → 587,11
if (!encoder)
ret = connector_status_disconnected;
 
if (radeon_connector->ddc_bus) {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
740,11 → 742,13
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
bool dret;
bool dret = false;
 
if (radeon_connector->ddc_bus) {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
1343,7 → 1347,7
radeon_connector->dac_load_detect = false;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
radeon_connector->dac_load_detect);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
/drivers/video/drm/radeon/radeon_device.c
46,15 → 46,19
int radeon_connector_table = 0;
int radeon_tv = 0;
int radeon_modeset = 1;
int radeon_new_pll = 1;
int radeon_vram_limit = 0;
int radeon_audio = 0;
 
void parse_cmdline(char *cmdline, mode_t *mode, char *log, int *kms);
int init_display(struct radeon_device *rdev, mode_t *mode);
int init_display_kms(struct radeon_device *rdev, mode_t *mode);
 
int get_modes(mode_t *mode, int *count);
int set_user_mode(mode_t *mode);
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
int init_display(struct radeon_device *rdev, videomode_t *mode);
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
 
int get_modes(videomode_t *mode, int *count);
int set_user_mode(videomode_t *mode);
 
 
/* Legacy VGA regions */
#define VGA_RSRC_NONE 0x00
#define VGA_RSRC_LEGACY_IO 0x01
71,16 → 75,11
*/
void radeon_surface_init(struct radeon_device *rdev)
{
ENTER();
 
/* FIXME: check this out */
if (rdev->family < CHIP_R600) {
int i;
 
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
if (rdev->surface_regs[i].bo)
radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
else
radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
421,6 → 420,12
/* FIXME: not supported yet */
return -EINVAL;
}
 
if (rdev->flags & RADEON_IS_IGP) {
rdev->asic->get_memory_clock = NULL;
rdev->asic->set_memory_clock = NULL;
}
 
return 0;
}
 
567,11 → 572,75
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
 
/*
* Radeon device.
*/
void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
switch (radeon_vram_limit) {
case 0:
case 4:
case 8:
case 16:
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
break;
}
radeon_vram_limit = radeon_vram_limit << 20;
/* gtt size must be power of two and greater or equal to 32M */
switch (radeon_gart_size) {
case 4:
case 8:
case 16:
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
radeon_gart_size = 512;
break;
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = 512;
break;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
case 0:
case 1:
case 2:
case 4:
case 8:
break;
default:
dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
radeon_agpmode = 0;
break;
}
}
 
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
600,9 → 669,9
 
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r) {
if (r)
return r;
}
radeon_check_arguments(rdev);
 
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
723,7 → 792,7
return 0;
}
 
mode_t usermode;
videomode_t usermode;
 
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
867,9 → 936,9
 
if( radeon_modeset &&
(outp != NULL) && (io->out_size == 4) &&
(io->inp_size == *outp * sizeof(mode_t)) )
(io->inp_size == *outp * sizeof(videomode_t)) )
{
retval = get_modes((mode_t*)inp, outp);
retval = get_modes((videomode_t*)inp, outp);
};
break;
 
879,9 → 948,9
 
if( radeon_modeset &&
(inp != NULL) &&
(io->inp_size == sizeof(mode_t)) )
(io->inp_size == sizeof(videomode_t)) )
{
retval = set_user_mode((mode_t*)inp);
retval = set_user_mode((videomode_t*)inp);
};
break;
};
890,7 → 959,7
}
 
static char log[256];
static dev_t device;
static pci_dev_t device;
 
u32_t drvEntry(int action, char *cmdline)
{
918,7 → 987,7
return 0;
};
}
dbgprintf("Radeon RC09 cmdline %s\n", cmdline);
dbgprintf("Radeon RC9 cmdline %s\n", cmdline);
 
enum_pci_devices();
 
/drivers/video/drm/radeon/radeon_display.c
234,7 → 234,7
"INTERNAL_UNIPHY2",
};
 
static const char *connector_names[13] = {
static const char *connector_names[15] = {
"Unknown",
"VGA",
"DVI-I",
248,8 → 248,20
"DisplayPort",
"HDMI-A",
"HDMI-B",
"TV",
"eDP",
};
 
static const char *hpd_names[7] = {
"NONE",
"HPD1",
"HPD2",
"HPD3",
"HPD4",
"HPD5",
"HPD6",
};
 
static void radeon_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
264,16 → 276,27
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->ddc_bus)
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
radeon_connector->ddc_bus->rec.a_clk_reg,
radeon_connector->ddc_bus->rec.a_data_reg,
radeon_connector->ddc_bus->rec.put_clk_reg,
radeon_connector->ddc_bus->rec.put_data_reg,
radeon_connector->ddc_bus->rec.get_clk_reg,
radeon_connector->ddc_bus->rec.get_data_reg);
radeon_connector->ddc_bus->rec.en_clk_reg,
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
}
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
317,13 → 340,17
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
} else
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
if (ret == false)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
} else {
if (!ASIC_IS_AVIVO(rdev))
ret = radeon_get_legacy_connector_info_from_table(dev);
}
if (ret) {
radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
336,12 → 363,19
{
int ret = 0;
 
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
}
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
radeon_i2c_do_lock(radeon_connector, 1);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
 
if (radeon_connector->edid) {
361,9 → 395,9
 
if (!radeon_connector->ddc_bus)
return -1;
radeon_i2c_do_lock(radeon_connector, 1);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
386,11 → 420,12
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags)
uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
uint32_t min_post_div = pll->min_post_div;
uint32_t max_post_div = pll->max_post_div;
uint32_t min_fractional_feed_div = 0;
uint32_t max_fractional_feed_div = 0;
uint32_t best_vco = pll->best_vco;
406,7 → 441,7
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
 
if (flags & RADEON_PLL_USE_REF_DIV)
if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
while (min_ref_div < max_ref_div-1) {
421,19 → 456,22
}
}
 
if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
if (pll->flags & RADEON_PLL_USE_POST_DIV)
min_post_div = max_post_div = pll->post_div;
 
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
min_fractional_feed_div = pll->min_frac_feedback_div;
max_fractional_feed_div = pll->max_frac_feedback_div;
}
 
for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
uint32_t ref_div;
 
if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue;
 
/* legacy radeons only have a few post_divs */
if (flags & RADEON_PLL_LEGACY) {
if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
480,7 → 518,7
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div);
 
if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
error = freq - current_freq;
error = error < 0 ? 0xffffffff : error;
} else
507,12 → 545,12
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
} else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
542,6 → 580,97
*post_div_p = best_post_div;
}
 
void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p)
{
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min;
fixed20_12 pll_in_max, pll_in_min;
fixed20_12 reference_freq;
fixed20_12 error, ffreq, a, b;
 
pll_out_max.full = rfixed_const(pll->pll_out_max);
pll_out_min.full = rfixed_const(pll->pll_out_min);
pll_in_max.full = rfixed_const(pll->pll_in_max);
pll_in_min.full = rfixed_const(pll->pll_in_min);
reference_freq.full = rfixed_const(pll->reference_freq);
do_div(freq, 10);
ffreq.full = rfixed_const(freq);
error.full = rfixed_const(100 * 100);
 
/* max p */
p.full = rfixed_div(pll_out_max, ffreq);
p.full = rfixed_floor(p);
 
/* min m */
m.full = rfixed_div(reference_freq, pll_in_max);
m.full = rfixed_ceil(m);
 
while (1) {
n.full = rfixed_div(ffreq, reference_freq);
n.full = rfixed_mul(n, m);
n.full = rfixed_mul(n, p);
 
f_vco.full = rfixed_div(n, m);
f_vco.full = rfixed_mul(f_vco, reference_freq);
 
f_pclk.full = rfixed_div(f_vco, p);
 
if (f_pclk.full > ffreq.full)
error.full = f_pclk.full - ffreq.full;
else
error.full = ffreq.full - f_pclk.full;
error.full = rfixed_div(error, f_pclk);
a.full = rfixed_const(100 * 100);
error.full = rfixed_mul(error, a);
 
a.full = rfixed_mul(m, p);
a.full = rfixed_div(n, a);
best_freq.full = rfixed_mul(reference_freq, a);
 
if (rfixed_trunc(error) < 25)
break;
 
a.full = rfixed_const(1);
m.full = m.full + a.full;
a.full = rfixed_div(reference_freq, m);
if (a.full >= pll_in_min.full)
continue;
 
m.full = rfixed_div(reference_freq, pll_in_max);
m.full = rfixed_ceil(m);
a.full= rfixed_const(1);
p.full = p.full - a.full;
a.full = rfixed_mul(p, ffreq);
if (a.full >= pll_out_min.full)
continue;
else {
DRM_ERROR("Unable to find pll dividers\n");
break;
}
}
 
a.full = rfixed_const(10);
b.full = rfixed_mul(n, a);
 
frac_n.full = rfixed_floor(n);
frac_n.full = rfixed_mul(frac_n, a);
frac_n.full = b.full - frac_n.full;
 
*dot_clock_p = rfixed_trunc(best_freq);
*fb_div_p = rfixed_trunc(n);
*frac_fb_div_p = rfixed_trunc(frac_n);
*ref_div_p = rfixed_trunc(m);
*post_div_p = rfixed_trunc(p);
 
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
}
 
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
632,7 → 761,7
{ TV_STD_SECAM, "secam" },
};
 
int radeon_modeset_create_props(struct radeon_device *rdev)
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
 
645,7 → 774,7
return -ENOMEM;
 
rdev->mode_info.coherent_mode_property->values[0] = 0;
rdev->mode_info.coherent_mode_property->values[0] = 1;
rdev->mode_info.coherent_mode_property->values[1] = 1;
}
 
if (!ASIC_IS_AVIVO(rdev)) {
669,7 → 798,7
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0;
rdev->mode_info.load_detect_property->values[0] = 1;
rdev->mode_info.load_detect_property->values[1] = 1;
 
drm_mode_create_scaling_mode_property(rdev->ddev);
 
726,6 → 855,8
if (!ret) {
return ret;
}
/* initialize hpd */
radeon_hpd_init(rdev);
drm_helper_initial_config(rdev->ddev);
return 0;
}
733,6 → 864,7
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
753,7 → 885,15
if (encoder->crtc != crtc)
continue;
if (first) {
/* set scaling */
if (radeon_encoder->rmx_type == RMX_OFF)
radeon_crtc->rmx_type = RMX_OFF;
else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
mode->vdisplay < radeon_encoder->native_mode.vdisplay)
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
else
radeon_crtc->rmx_type = RMX_OFF;
/* copy native mode */
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
/drivers/video/drm/radeon/radeon_fb.c
146,8 → 146,8
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_object *robj = NULL;
void *device = NULL; //&rdev->pdev->dev;
struct radeon_bo *rbo = NULL;
// struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
u64 fb_gpuaddr;
void *fbptr = NULL;
163,7 → 163,7
if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
surface_bpp = 32;
 
mode_cmd.bpp = 32;
mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
mode_cmd.depth = surface_depth;
171,10 → 171,10
size = mode_cmd.pitch * mode_cmd.height;
aligned_size = ALIGN(size, PAGE_SIZE);
 
ret = radeon_gem_fb_object_create(rdev, aligned_size, 0,
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, 0,
false, &gobj);
false, ttm_bo_type_kernel,
&gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
surface_width, surface_height);
181,8 → 181,30
ret = -ENOMEM;
goto out;
}
robj = gobj->driver_private;
rbo = gobj->driver_private;
 
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
 
#ifdef __BIG_ENDIAN
switch (mode_cmd.bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
case 16:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
}
#endif
 
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
mode_cmd.pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
mutex_lock(&rdev->ddev->struct_mutex);
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
if (fb == NULL) {
190,12 → 212,21
ret = -ENOMEM;
goto out_unref;
}
ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
if (ret) {
printk(KERN_ERR "failed to pin framebuffer\n");
ret = -ENOMEM;
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
ret = radeon_bo_kmap(rbo, &fbptr);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
 
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
 
202,9 → 233,9
*fb_p = fb;
rfb = to_radeon_framebuffer(fb);
rdev->fbdev_rfb = rfb;
rdev->fbdev_robj = robj;
rdev->fbdev_rbo = rbo;
 
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
info = framebuffer_alloc(sizeof(struct radeon_fb_device), NULL);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
223,14 → 254,7
if (ret)
goto out_unref;
 
// ret = radeon_object_kmap(robj, &fbptr);
// if (ret) {
// goto out_unref;
// }
 
 
fbptr = (void*)0xFE000000; // LFB_BASE
 
strcpy(info->fix.id, "radeondrmfb");
 
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
277,9 → 301,13
return 0;
 
out_unref:
if (robj) {
// radeon_object_kunmap(robj);
if (rbo) {
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unreserve(rbo);
}
}
if (fb && ret) {
list_del(&fb->filp_head);
// drm_gem_object_unreference(gobj);
294,6 → 322,13
 
int radeonfb_probe(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
int bpp_sel = 32;
 
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
 
return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
}
 
301,7 → 336,8
{
struct fb_info *info;
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
struct radeon_object *robj;
struct radeon_bo *rbo;
int r;
 
if (!fb) {
return -EINVAL;
309,12 → 345,17
info = fb->fbdev;
if (info) {
struct radeon_fb_device *rfbdev = info->par;
robj = rfb->obj->driver_private;
rbo = rfb->obj->driver_private;
// unregister_framebuffer(info);
// radeon_object_kunmap(robj);
// radeon_object_unpin(robj);
// framebuffer_release(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
drm_fb_helper_free(&rfbdev->helper);
framebuffer_release(info);
}
 
printk(KERN_INFO "unregistered panic notifier\n");
 
323,120 → 364,4
EXPORT_SYMBOL(radeonfb_remove);
 
 
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
 
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 
obj->dev = dev;
// obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
// if (IS_ERR(obj->filp)) {
// kfree(obj);
// return NULL;
// }
 
// kref_init(&obj->refcount);
// kref_init(&obj->handlecount);
obj->size = size;
 
// if (dev->driver->gem_init_object != NULL &&
// dev->driver->gem_init_object(obj) != 0) {
// fput(obj->filp);
// kfree(obj);
// return NULL;
// }
// atomic_inc(&dev->object_count);
// atomic_add(obj->size, &dev->object_memory);
return obj;
}
 
 
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
bool discardable, bool kernel,
bool interruptible,
struct drm_gem_object **obj)
{
struct drm_gem_object *gobj;
struct radeon_object *robj;
 
*obj = NULL;
gobj = drm_gem_object_alloc(rdev->ddev, size);
if (!gobj) {
return -ENOMEM;
}
/* At least align on page size */
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
 
robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
if (!robj) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
size, initial_domain, alignment);
// mutex_lock(&rdev->ddev->struct_mutex);
// drm_gem_object_unreference(gobj);
// mutex_unlock(&rdev->ddev->struct_mutex);
return -ENOMEM;;
}
robj->rdev = rdev;
robj->gobj = gobj;
INIT_LIST_HEAD(&robj->list);
 
robj->flags = TTM_PL_FLAG_VRAM;
 
struct drm_mm_node *vm_node;
 
vm_node = kzalloc(sizeof(*vm_node),0);
 
vm_node->free = 0;
vm_node->size = 0xC00000 >> 12;
vm_node->start = 0;
vm_node->mm = NULL;
 
robj->mm_node = vm_node;
 
robj->vm_addr = ((uint32_t)robj->mm_node->start);
 
gobj->driver_private = robj;
*obj = gobj;
return 0;
}
 
 
struct fb_info *framebuffer_alloc(size_t size, void *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
int fb_info_size = sizeof(struct fb_info);
struct fb_info *info;
char *p;
 
if (size)
fb_info_size += PADDING;
 
p = kzalloc(fb_info_size + size, GFP_KERNEL);
 
if (!p)
return NULL;
 
info = (struct fb_info *) p;
 
if (size)
info->par = p + fb_info_size;
 
return info;
#undef PADDING
#undef BYTES_PER_LONG
}
 
 
 
 
/drivers/video/drm/radeon/radeon_fence.c
140,16 → 140,15
 
bool radeon_fence_signaled(struct radeon_fence *fence)
{
struct radeon_device *rdev = fence->rdev;
unsigned long irq_flags;
bool signaled = false;
 
if (rdev->gpu_lockup) {
if (!fence)
return true;
}
if (fence == NULL) {
 
if (fence->rdev->gpu_lockup)
return true;
}
 
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
324,7 → 323,7
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
if (r) {
DRM_ERROR("Fence failed to get a scratch register.");
dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
335,9 → 334,10
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for fence !\n");
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
return 0;
}
346,11 → 346,13
{
unsigned long irq_flags;
 
if (!rdev->fence_drv.initialized)
return;
wake_up_all(&rdev->fence_drv.queue);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
DRM_INFO("radeon: fence finalized\n");
rdev->fence_drv.initialized = false;
}
 
 
/drivers/video/drm/radeon/radeon_gart.c
78,11 → 78,9
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
r = radeon_object_create(rdev, NULL,
rdev->gart.table_size,
true,
RADEON_GEM_DOMAIN_VRAM,
false, &rdev->gart.table.vram.robj);
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
if (r) {
return r;
}
95,48 → 93,42
uint64_t gpu_addr;
int r;
 
r = radeon_object_pin(rdev->gart.table.vram.robj,
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->gart.table.vram.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
// radeon_object_unref(&rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
r = radeon_object_kmap(rdev->gart.table.vram.robj,
r = radeon_bo_kmap(rdev->gart.table.vram.robj,
(void **)&rdev->gart.table.vram.ptr);
if (r) {
// radeon_object_unpin(rdev->gart.table.vram.robj);
// radeon_object_unref(&rdev->gart.table.vram.robj);
DRM_ERROR("radeon: failed to map gart vram table.\n");
if (r)
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
return r;
}
 
rdev->gart.table_addr = gpu_addr;
 
dbgprintf("alloc gart vram: gpu_base %x lin_addr %x\n",
rdev->gart.table_addr, rdev->gart.table.vram.ptr);
 
// gpu_addr = 0x800000;
 
// u32_t pci_addr = rdev->mc.aper_base + gpu_addr;
 
// rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW);
 
 
// dbgprintf("alloc gart vram:\n gpu_base %x pci_base %x lin_addr %x",
// gpu_addr, pci_addr, rdev->gart.table.vram.ptr);
 
return 0;
}
 
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
return;
}
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
// radeon_object_unref(&rdev->gart.table.vram.robj);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
radeon_bo_unref(&rdev->gart.table.vram.robj);
}
 
 
 
152,7 → 144,7
int i, j;
 
if (!rdev->gart.ready) {
// WARN(1, "trying to unbind memory to unitialized GART !\n");
WARN(1, "trying to unbind memory to unitialized GART !\n");
return;
}
t = offset / RADEON_GPU_PAGE_SIZE;
234,13 → 226,13
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
GFP_KERNEL);
if (rdev->gart.pages == NULL) {
// radeon_gart_fini(rdev);
radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_addr = kzalloc(sizeof(u32_t) *
rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages, GFP_KERNEL);
if (rdev->gart.pages_addr == NULL) {
// radeon_gart_fini(rdev);
radeon_gart_fini(rdev);
return -ENOMEM;
}
return 0;
/drivers/video/drm/radeon/radeon_gem.c
30,31 → 30,6
#include "radeon_drm.h"
#include "radeon.h"
 
 
#define TTM_PL_SYSTEM 0
#define TTM_PL_TT 1
#define TTM_PL_VRAM 2
#define TTM_PL_PRIV0 3
#define TTM_PL_PRIV1 4
#define TTM_PL_PRIV2 5
#define TTM_PL_PRIV3 6
#define TTM_PL_PRIV4 7
#define TTM_PL_PRIV5 8
#define TTM_PL_SWAPPED 15
 
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
#define TTM_PL_MASK_MEM 0x0000FFFF
 
 
int radeon_gem_object_init(struct drm_gem_object *obj)
{
/* we do nothings here */
63,11 → 38,11
 
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_object *robj = gobj->driver_private;
struct radeon_bo *robj = gobj->driver_private;
 
gobj->driver_private = NULL;
if (robj) {
// radeon_object_unref(&robj);
radeon_bo_unref(&robj);
}
}
 
74,11 → 49,10
int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
bool discardable, bool kernel,
bool interruptible,
struct drm_gem_object **obj)
{
struct drm_gem_object *gobj;
struct radeon_object *robj;
struct radeon_bo *robj;
int r;
 
*obj = NULL;
90,14 → 64,10
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
interruptible, &robj);
r = radeon_fb_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
size, initial_domain, alignment);
// mutex_lock(&rdev->ddev->struct_mutex);
// drm_gem_object_unreference(gobj);
// mutex_unlock(&rdev->ddev->struct_mutex);
return r;
}
gobj->driver_private = robj;
108,33 → 78,33
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
struct radeon_object *robj = obj->driver_private;
uint32_t flags;
struct radeon_bo *robj = obj->driver_private;
int r;
 
switch (pin_domain) {
case RADEON_GEM_DOMAIN_VRAM:
flags = TTM_PL_FLAG_VRAM;
break;
case RADEON_GEM_DOMAIN_GTT:
flags = TTM_PL_FLAG_TT;
break;
default:
flags = TTM_PL_FLAG_SYSTEM;
break;
r = radeon_bo_reserve(robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(robj, pin_domain, gpu_addr);
radeon_bo_unreserve(robj);
return r;
}
return radeon_object_pin(robj, flags, gpu_addr);
}
 
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
struct radeon_object *robj = obj->driver_private;
// radeon_object_unpin(robj);
struct radeon_bo *robj = obj->driver_private;
int r;
 
r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
}
 
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
struct radeon_object *robj;
struct radeon_bo *robj;
uint32_t domain;
int r;
 
152,12 → 122,12
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
// r = radeon_object_wait(robj);
if (r) {
printk(KERN_ERR "Failed to wait for object !\n");
return r;
// r = radeon_bo_wait(robj, NULL, false);
// if (r) {
// printk(KERN_ERR "Failed to wait for object !\n");
// return r;
// }
}
}
return 0;
}
 
218,7 → 188,7
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, false,
false, true, &gobj);
false, &gobj);
if (r) {
return r;
}
243,7 → 213,7
* just validate the BO into a certain domain */
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_object *robj;
struct radeon_bo *robj;
int r;
 
/* for now if someone requests domain CPU -
269,8 → 239,7
{
struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
struct radeon_object *robj;
int r;
struct radeon_bo *robj;
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
277,19 → 246,45
return -EINVAL;
}
robj = gobj->driver_private;
r = radeon_object_mmap(robj, &args->addr_ptr);
args->addr_ptr = radeon_bo_mmap_offset(robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
return r;
return 0;
}
 
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
/* FIXME: implement */
return 0;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
uint32_t cur_placement = 0;
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
break;
case TTM_PL_TT:
args->domain = RADEON_GEM_DOMAIN_GTT;
break;
case TTM_PL_SYSTEM:
args->domain = RADEON_GEM_DOMAIN_CPU;
default:
break;
}
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
return r;
}
 
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
296,7 → 291,7
{
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_object *robj;
struct radeon_bo *robj;
int r;
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
304,7 → 299,10
return -EINVAL;
}
robj = gobj->driver_private;
r = radeon_object_wait(robj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
311,4 → 309,24
return r;
}
 
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_set_tiling *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r = 0;
 
DRM_DEBUG("%d \n", args->handle);
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -EINVAL;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
return r;
}
 
#endif
/drivers/video/drm/radeon/radeon_i2c.c
216,7 → 216,7
return NULL;
 
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
// i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
/drivers/video/drm/radeon/radeon_legacy_crtc.c
43,8 → 43,7
}
 
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
322,13 → 321,11
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
}
// drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
// drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
340,69 → 337,6
}
}
 
/* properly set crtc bpp when using atombios */
void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int format;
uint32_t crtc_gen_cntl;
uint32_t disp_merge_cntl;
uint32_t crtc_pitch;
 
switch (crtc->fb->bits_per_pixel) {
case 8:
format = 2;
break;
case 15: /* 555 */
format = 3;
break;
case 16: /* 565 */
format = 4;
break;
case 24: /* RGB */
format = 5;
break;
case 32: /* xRGB */
format = 6;
break;
default:
return;
}
 
crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
((crtc->fb->bits_per_pixel * 8) - 1)) /
(crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
 
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
 
switch (radeon_crtc->crtc_id) {
case 0:
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
 
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
crtc_gen_cntl |= (format << 8);
crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
break;
case 1:
disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
 
crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
crtc_gen_cntl |= (format << 8);
WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
break;
}
}
 
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
756,7 → 690,6
uint32_t post_divider = 0;
uint32_t freq = 0;
uint8_t pll_gain;
int pll_flags = RADEON_PLL_LEGACY;
bool use_bios_divs = false;
/* PLL registers */
uint32_t pll_ref_div = 0;
790,10 → 723,12
else
pll = &rdev->clock.p1pll;
 
pll->flags = RADEON_PLL_LEGACY;
 
if (mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
805,7 → 740,7
}
 
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
820,7 → 755,7
}
}
}
pll_flags |= RADEON_PLL_USE_REF_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
}
}
}
830,8 → 765,7
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div,
&reference_div, &post_divider,
pll_flags);
&reference_div, &post_divider);
 
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider)
1059,7 → 993,7
radeon_set_pll(crtc, adjusted_mode);
radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
} else {
if (radeon_crtc->rmx_type != RMX_OFF) {
/* FIXME: only first crtc has rmx what should we
/drivers/video/drm/radeon/radeon_legacy_tv.c
77,7 → 77,7
unsigned pix_to_tv;
};
 
static const uint16_t hor_timing_NTSC[] = {
static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x003f,
0x0263,
98,7 → 98,7
0
};
 
static const uint16_t vert_timing_NTSC[] = {
static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200d,
0x1006,
115,7 → 115,7
0
};
 
static const uint16_t hor_timing_PAL[] = {
static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x0058,
0x027c,
136,7 → 136,7
0
};
 
static const uint16_t vert_timing_PAL[] = {
static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200c,
0x1005,
623,9 → 623,9
}
flicker_removal = (tmp + 500) / 1000;
 
if (flicker_removal < 3)
flicker_removal = 3;
for (i = 0; i < 6; ++i) {
if (flicker_removal < 2)
flicker_removal = 2;
for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
if (flicker_removal == SLOPE_limit[i])
break;
}
/drivers/video/drm/radeon/radeon_mode.h
46,32 → 46,6
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 
enum radeon_connector_type {
CONNECTOR_NONE,
CONNECTOR_VGA,
CONNECTOR_DVI_I,
CONNECTOR_DVI_D,
CONNECTOR_DVI_A,
CONNECTOR_STV,
CONNECTOR_CTV,
CONNECTOR_LVDS,
CONNECTOR_DIGITAL,
CONNECTOR_SCART,
CONNECTOR_HDMI_TYPE_A,
CONNECTOR_HDMI_TYPE_B,
CONNECTOR_0XC,
CONNECTOR_0XD,
CONNECTOR_DIN,
CONNECTOR_DISPLAY_PORT,
CONNECTOR_UNSUPPORTED
};
 
enum radeon_dvi_type {
DVI_AUTO,
DVI_DIGITAL,
DVI_ANALOG
};
 
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
88,6 → 62,7
TV_STD_SCART_PAL,
TV_STD_SECAM,
TV_STD_PAL_CN,
TV_STD_PAL_N,
};
 
/* radeon gpio-based i2c
150,16 → 125,24
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
 
struct radeon_pll {
uint16_t reference_freq;
uint16_t reference_div;
/* reference frequency */
uint32_t reference_freq;
 
/* fixed dividers */
uint32_t reference_div;
uint32_t post_div;
 
/* pll in/out limits */
uint32_t pll_in_min;
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
uint16_t xclk;
uint32_t best_vco;
 
/* divider limits */
uint32_t min_ref_div;
uint32_t max_ref_div;
uint32_t min_post_div;
168,7 → 151,12
uint32_t max_feedback_div;
uint32_t min_frac_feedback_div;
uint32_t max_frac_feedback_div;
uint32_t best_vco;
 
/* flags for the current clock */
uint32_t flags;
 
/* pll id */
uint32_t id;
};
 
struct radeon_i2c_chan {
311,7 → 299,7
struct radeon_encoder_atom_dig {
/* atom dig */
bool coherent_mode;
int dig_block;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
334,6 → 322,9
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
int hdmi_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
};
 
struct radeon_connector_atom_dig {
392,6 → 383,11
struct drm_gem_object *obj;
};
 
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev);
 
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
434,8 → 430,7
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags);
uint32_t *post_div_p);
 
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
443,8 → 438,7
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags);
uint32_t *post_div_p);
 
extern void radeon_setup_encoder_clones(struct drm_device *dev);
 
470,7 → 464,6
 
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
 
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
/drivers/video/drm/radeon/radeon_object.h
59,24 → 59,10
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
* -ERESTART: A wait for the buffer to become unreserved was interrupted by
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
int r;
 
retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
}
 
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
ttm_bo_unreserve(&bo->tbo);
125,11 → 111,9
{
int r;
 
retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
return r;
}
140,8 → 124,6
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.lock);
ttm_bo_unreserve(&bo->tbo);
if (unlikely(r == -ERESTART))
goto retry;
return r;
}
 
/drivers/video/drm/radeon/radeon_object_kos.c
0,0 → 1,388
 
#include <linux/list.h>
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
 
 
static struct drm_mm mm_gtt;
static struct drm_mm mm_vram;
 
int drm_mm_alloc(struct drm_mm *mm, size_t num_pages,
struct drm_mm_node **node)
{
struct drm_mm_node *vm_node;
int r;
 
retry_pre_get:
 
r = drm_mm_pre_get(mm);
 
if (unlikely(r != 0))
return r;
 
vm_node = drm_mm_search_free(mm, num_pages, 0, 0);
 
if (unlikely(vm_node == NULL)) {
r = -ENOMEM;
return r;
}
 
*node = drm_mm_get_block_atomic(vm_node, num_pages, 0);
 
if (unlikely(*node == NULL)) {
goto retry_pre_get;
}
 
return 0;
};
 
 
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0;
 
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
if (domain & RADEON_GEM_DOMAIN_CPU)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
}
 
 
int radeon_bo_init(struct radeon_device *rdev)
{
int r;
 
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
rdev->mc.mc_vram_size >> 20,
(unsigned long long)rdev->mc.aper_size >> 20);
DRM_INFO("RAM width %dbits %cDR\n",
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
 
r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
};
 
r = drm_mm_init(&mm_gtt, 0, rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
}
 
return 0;
}
 
 
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
int r;
 
bo->tbo.reserved.counter = 1;
 
return 0;
}
 
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
bo->reserved.counter = 1;
}
 
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long size, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
enum ttm_bo_type type;
 
struct radeon_bo *bo;
size_t num_pages;
struct drm_mm *mman;
u32 bo_domain;
int r;
 
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
if (num_pages == 0) {
dbgprintf("Illegal buffer object size.\n");
return -EINVAL;
}
 
if(domain & RADEON_GEM_DOMAIN_VRAM)
{
mman = &mm_vram;
bo_domain = RADEON_GEM_DOMAIN_VRAM;
}
else if(domain & RADEON_GEM_DOMAIN_GTT)
{
mman = &mm_gtt;
bo_domain = RADEON_GEM_DOMAIN_GTT;
}
else return -EINVAL;
 
if (kernel) {
type = ttm_bo_type_kernel;
} else {
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
 
bo->rdev = rdev;
bo->gobj = gobj;
bo->surface_reg = -1;
bo->tbo.num_pages = num_pages;
bo->domain = domain;
 
INIT_LIST_HEAD(&bo->list);
 
// radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
 
r = drm_mm_alloc(mman, num_pages, &bo->tbo.vm_node);
if (unlikely(r != 0))
return r;
 
*bo_ptr = bo;
 
return 0;
}
 
#define page_tabs 0xFDC00000 /* just another hack */
 
int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
int r=0, i;
 
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo);
return 0;
}
 
bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
 
if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
{
bo->tbo.offset += (u64)bo->rdev->mc.vram_location;
}
else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
{
u32_t *pagelist;
bo->kptr = KernelAlloc( bo->tbo.num_pages << PAGE_SHIFT );
dbgprintf("kernel alloc %x\n", bo->kptr );
 
pagelist = &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
dbgprintf("pagelist %x\n", pagelist);
radeon_gart_bind(bo->rdev, bo->tbo.offset,
bo->tbo.vm_node->size, pagelist);
bo->tbo.offset += (u64)bo->rdev->mc.gtt_location;
}
else
{
DRM_ERROR("Unknown placement %x\n", bo->domain);
bo->tbo.offset = -1;
r = -1;
};
 
if (unlikely(r != 0)) {
DRM_ERROR("radeon: failed to pin object.\n");
}
 
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
}
 
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
return r;
};
 
int radeon_bo_unpin(struct radeon_bo *bo)
{
int r = 0;
 
if (!bo->pin_count) {
dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
 
if( bo->tbo.vm_node )
{
drm_mm_put_block(bo->tbo.vm_node);
bo->tbo.vm_node = NULL;
};
 
return r;
}
 
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
bool is_iomem;
 
if (bo->kptr) {
if (ptr) {
*ptr = bo->kptr;
}
return 0;
}
 
if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
{
bo->cpu_addr = bo->rdev->mc.aper_base +
(bo->tbo.vm_node->start << PAGE_SHIFT);
bo->kptr = (void*)MapIoMem(bo->cpu_addr,
bo->tbo.vm_node->size << 12, PG_SW);
}
else
{
return -1;
}
 
if (ptr) {
*ptr = bo->kptr;
}
 
return 0;
}
 
void radeon_bo_kunmap(struct radeon_bo *bo)
{
if (bo->kptr == NULL)
return;
 
if (bo->domain & RADEON_GEM_DOMAIN_VRAM)
{
FreeKernelSpace(bo->kptr);
}
 
bo->kptr = NULL;
 
}
 
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
 
if ((*bo) == NULL)
return;
 
*bo = NULL;
}
 
 
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
// BUG_ON(!atomic_read(&bo->tbo.reserved));
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
if (pitch)
*pitch = bo->pitch;
}
 
 
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
 
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 
obj->dev = dev;
obj->size = size;
return obj;
}
 
 
int radeon_fb_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long size, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
enum ttm_bo_type type;
 
struct radeon_bo *bo;
struct drm_mm *mman;
struct drm_mm_node *vm_node;
 
size_t num_pages;
u32 bo_domain;
int r;
 
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
if (num_pages == 0) {
dbgprintf("Illegal buffer object size.\n");
return -EINVAL;
}
 
if( (domain & RADEON_GEM_DOMAIN_VRAM) !=
RADEON_GEM_DOMAIN_VRAM )
{
return -EINVAL;
};
 
if (kernel) {
type = ttm_bo_type_kernel;
} else {
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
 
bo->rdev = rdev;
bo->gobj = gobj;
bo->surface_reg = -1;
bo->tbo.num_pages = num_pages;
bo->domain = domain;
 
INIT_LIST_HEAD(&bo->list);
 
// radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
 
vm_node = kzalloc(sizeof(*vm_node),0);
 
vm_node->free = 0;
vm_node->size = 0xC00000 >> 12;
vm_node->start = 0;
vm_node->mm = NULL;
 
bo->tbo.vm_node = vm_node;
bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
bo->tbo.offset += (u64)bo->rdev->mc.vram_location;
bo->kptr = (void*)0xFE000000;
bo->pin_count = 1;
 
*bo_ptr = bo;
 
return 0;
}
/drivers/video/drm/radeon/radeon_pm.c
44,8 → 44,11
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
 
seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
 
return 0;
}
/drivers/video/drm/radeon/radeon_ring.c
169,19 → 169,24
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
false, &rdev->ib_pool.robj);
&rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
return r;
207,6 → 212,8
 
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
int r;
 
if (!rdev->ib_pool.ready) {
return;
}
213,8 → 220,13
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
// radeon_object_kunmap(rdev->ib_pool.robj);
// radeon_object_unref(&rdev->ib_pool.robj);
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->ib_pool.robj);
radeon_bo_unpin(rdev->ib_pool.robj);
radeon_bo_unreserve(rdev->ib_pool.robj);
}
radeon_bo_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
}
mutex_unlock(&rdev->ib_pool.mutex);
294,46 → 306,31
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
true,
r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
RADEON_GEM_DOMAIN_GTT,
false,
&rdev->cp.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
mutex_unlock(&rdev->cp.mutex);
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
r = radeon_object_pin(rdev->cp.ring_obj,
RADEON_GEM_DOMAIN_GTT,
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->cp.gpu_addr);
if (r) {
DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
mutex_unlock(&rdev->cp.mutex);
radeon_bo_unreserve(rdev->cp.ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
r = radeon_object_kmap(rdev->cp.ring_obj,
r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
mutex_unlock(&rdev->cp.mutex);
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
 
 
// rdev->cp.ring = CreateRingBuffer( ring_size, PG_SW );
 
dbgprintf("ring buffer %x\n", rdev->cp.ring );
 
// rdev->cp.gpu_addr = rdev->mc.gtt_location;
 
// u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12];
 
// dbgprintf("pagelist %x\n", pagelist);
 
// radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist);
 
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
 
344,11 → 341,17
 
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
 
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj) {
// radeon_object_kunmap(rdev->cp.ring_obj);
// radeon_object_unpin(rdev->cp.ring_obj);
// radeon_object_unref(&rdev->cp.ring_obj);
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->cp.ring_obj);
radeon_bo_unpin(rdev->cp.ring_obj);
radeon_bo_unreserve(rdev->cp.ring_obj);
}
radeon_bo_unref(&rdev->cp.ring_obj);
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
}
/drivers/video/drm/radeon/radeon_ttm.c
0,0 → 1,270
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include "radeon_reg.h"
#include "radeon.h"
 
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
 
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
struct radeon_mman *mman;
struct radeon_device *rdev;
 
mman = container_of(bdev, struct radeon_mman, bdev);
rdev = container_of(mman, struct radeon_device, mman);
return rdev;
}
 
 
/*
* Global memory.
*/
static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
 
static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
 
static int radeon_ttm_global_init(struct radeon_device *rdev)
{
struct ttm_global_reference *global_ref;
int r;
 
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &radeon_ttm_mem_global_init;
global_ref->release = &radeon_ttm_mem_global_release;
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
return r;
}
 
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
global_ref->global_type = TTM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
ttm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
 
rdev->mman.mem_global_referenced = true;
return 0;
}
 
 
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
 
 
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct radeon_device *rdev;
 
rdev = radeon_get_rdev(bdev);
 
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
man->gpu_offset = rdev->mc.gtt_location;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = rdev->mc.agp_base;
man->io_size = rdev->mc.gtt_size;
man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
} else
#endif
{
man->io_offset = 0;
man->io_size = 0;
man->io_addr = NULL;
}
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_location;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
man->io_addr = NULL;
man->io_offset = rdev->mc.aper_base;
man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
 
static struct ttm_bo_driver radeon_bo_driver = {
// .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
// .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
// .evict_flags = &radeon_evict_flags,
// .move = &radeon_bo_move,
// .verify_access = &radeon_verify_access,
// .sync_obj_signaled = &radeon_sync_obj_signaled,
// .sync_obj_wait = &radeon_sync_obj_wait,
// .sync_obj_flush = &radeon_sync_obj_flush,
// .sync_obj_unref = &radeon_sync_obj_unref,
// .sync_obj_ref = &radeon_sync_obj_ref,
// .move_notify = &radeon_bo_move_notify,
// .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
};
 
int radeon_ttm_init(struct radeon_device *rdev)
{
int r;
 
r = radeon_ttm_global_init(rdev);
if (r) {
return r;
}
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
rdev->mman.initialized = true;
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
return r;
}
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r)
return r;
r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
}
 
r = radeon_ttm_debugfs_init(rdev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
return r;
}
return 0;
}
 
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
/drivers/video/drm/radeon/rdisplay.c
29,17 → 29,21
 
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
r = radeon_object_create(rdev, NULL, CURSOR_WIDTH*CURSOR_HEIGHT*4,
false,
RADEON_GEM_DOMAIN_VRAM,
false, &cursor->robj);
r = radeon_bo_create(rdev, NULL, CURSOR_WIDTH*CURSOR_HEIGHT*4,
false, RADEON_GEM_DOMAIN_VRAM, &cursor->robj);
 
if (unlikely(r != 0))
return r;
 
radeon_object_pin(cursor->robj, TTM_PL_FLAG_VRAM, NULL);
r = radeon_bo_reserve(cursor->robj, false);
if (unlikely(r != 0))
return r;
 
r = radeon_object_kmap(cursor->robj, &bits);
r = radeon_bo_pin(cursor->robj, RADEON_GEM_DOMAIN_VRAM, NULL);
if (unlikely(r != 0))
return r;
 
r = radeon_bo_kmap(cursor->robj, (void**)&bits);
if (r) {
DRM_ERROR("radeon: failed to map cursor (%d).\n", r);
return r;
57,7 → 61,7
for(i = 0; i < CURSOR_WIDTH*(CURSOR_HEIGHT-32); i++)
*bits++ = 0;
 
radeon_object_kunmap(cursor->robj);
radeon_bo_kunmap(cursor->robj);
 
// cursor->header.destroy = destroy_cursor;
 
67,7 → 71,7
void fini_cursor(cursor_t *cursor)
{
list_del(&cursor->list);
radeon_object_unpin(cursor->robj);
radeon_bo_unpin(cursor->robj);
KernelFree(cursor->data);
__DestroyObject(cursor);
};
100,7 → 104,7
old = rdisplay->cursor;
 
rdisplay->cursor = cursor;
// gpu_addr = cursor->robj->gpu_addr;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
 
if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS, gpu_addr);
149,24 → 153,40
if (ASIC_IS_AVIVO(rdev))
{
int w = 32;
int i = 0;
 
WREG32(AVIVO_D1CUR_POSITION, (x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT, (hot_x << 16) | hot_y);
WREG32(AVIVO_D1CUR_SIZE, ((w - 1) << 16) | 31);
} else {
 
uint32_t gpu_addr;
int xorg =0, yorg=0;
 
x = x - hot_x;
y = y - hot_y;
 
if( x < 0 )
{
xorg = -x + 1;
x = 0;
}
 
if( y < 0 )
{
yorg = -hot_y + 1;
y = 0;
};
 
WREG32(RADEON_CUR_HORZ_VERT_OFF,
(RADEON_CUR_LOCK | (hot_x << 16) | hot_y ));
(RADEON_CUR_LOCK | (xorg << 16) | yorg ));
WREG32(RADEON_CUR_HORZ_VERT_POSN,
(RADEON_CUR_LOCK | (x << 16) | y));
 
// gpu_addr = cursor->robj->gpu_addr;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
 
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET,
(gpu_addr - rdev->mc.vram_location + (hot_y * 256)));
(gpu_addr - rdev->mc.vram_location + (yorg * 256)));
}
radeon_lock_cursor(false);
}
176,7 → 196,7
};
 
 
bool init_display(struct radeon_device *rdev, mode_t *usermode)
bool init_display(struct radeon_device *rdev, videomode_t *usermode)
{
struct drm_device *dev;
 
216,4 → 236,34
};
 
 
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
int fb_info_size = sizeof(struct fb_info);
struct fb_info *info;
char *p;
 
if (size)
fb_info_size += PADDING;
 
p = kzalloc(fb_info_size + size, GFP_KERNEL);
 
if (!p)
return NULL;
 
info = (struct fb_info *) p;
 
if (size)
info->par = p + fb_info_size;
 
return info;
#undef PADDING
#undef BYTES_PER_LONG
}
 
void framebuffer_release(struct fb_info *info)
{
kfree(info);
}
 
/drivers/video/drm/radeon/rdisplay_kms.c
78,7 → 78,7
old = rdisplay->cursor;
 
rdisplay->cursor = cursor;
// gpu_addr = cursor->robj->gpu_addr;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
 
if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
148,14 → 148,34
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
y *= 2;
 
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK | (hot_x << 16) | hot_y ));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
uint32_t gpu_addr;
int xorg =0, yorg=0;
 
x = x - hot_x;
y = y - hot_y;
 
if( x < 0 )
{
xorg = -x + 1;
x = 0;
}
 
if( y < 0 )
{
yorg = -hot_y + 1;
y = 0;
};
 
WREG32(RADEON_CUR_HORZ_VERT_OFF,
(RADEON_CUR_LOCK | (xorg << 16) | yorg ));
WREG32(RADEON_CUR_HORZ_VERT_POSN,
(RADEON_CUR_LOCK | (x << 16) | y));
 
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
 
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
(radeon_crtc->legacy_cursor_offset + (hot_y * 256)));
WREG32(RADEON_CUR_OFFSET,
(gpu_addr - rdev->mc.vram_location + (yorg * 256)));
}
radeon_lock_cursor_kms(crtc, false);
}
447,3 → 467,40
return err;
};
 
#if 0
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
if (connector->status == connector_status_disconnected)
connector->encoder = NULL;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
if (!drm_helper_encoder_in_use(encoder)) {
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
}
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
}
}
}
#endif
/drivers/video/drm/radeon/rs400.c
223,6 → 223,22
return 0;
}
 
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
 
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(0x0150);
if (tmp & (1 << 2)) {
return 0;
}
DRM_UDELAY(1);
}
return -1;
}
 
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
229,9 → 245,9
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (r300_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait MC idle while "
"programming pipes. Bad things might happen.\n");
if (rs400_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "rs400: Failed to wait MC idle while "
"programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
}
}
 
370,8 → 386,8
r100_mc_stop(rdev, &save);
 
/* Wait for mc idle */
if (r300_mc_wait_for_idle(rdev))
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
if (rs400_mc_wait_for_idle(rdev))
dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
WREG32(R_000148_MC_FB_LOCATION,
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
/drivers/video/drm/radeon/rs600.c
272,10 → 272,14
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (r == 0) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
void rs600_gart_fini(struct radeon_device *rdev)
{
/drivers/video/drm/radeon/rv770.c
113,15 → 113,19
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
// radeon_object_kunmap(rdev->gart.table.vram.robj);
// radeon_object_unpin(rdev->gart.table.vram.robj);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
rv770_pcie_gart_disable(rdev);
// radeon_gart_table_vram_free(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
 
877,6 → 881,7
}
rv770_gpu_init(rdev);
 
 
// r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
// &rdev->r600_blit.shader_gpu_addr);
// if (r) {
/drivers/video/drm/ttm/ttm_bo.c
0,0 → 1,141
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* Notes:
*
* We store bo pointer in drm_mm_node struct so we know which bo own a
* specific node. There is no protection on the pointer, thus to make
* sure things don't go berserk you have to access this pointer while
* holding the global lru lock and make sure anytime you free a node you
* reset the pointer to NULL.
*/
 
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/module.h>
 
 
 
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
 
if (type >= TTM_NUM_MEM_TYPES) {
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
return ret;
}
 
man = &bdev->man[type];
if (man->has_type) {
printk(KERN_ERR TTM_PFX
"Memory manager already initialized for type %d\n",
type);
return ret;
}
 
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
return ret;
 
ret = 0;
if (type != TTM_PL_SYSTEM) {
if (!p_size) {
printk(KERN_ERR TTM_PFX
"Zero size memory manager type %d\n",
type);
return ret;
}
ret = drm_mm_init(&man->manager, 0, p_size);
if (ret)
return ret;
}
man->has_type = true;
man->use_type = true;
man->size = p_size;
 
INIT_LIST_HEAD(&man->lru);
 
return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);
 
int ttm_bo_global_init(struct ttm_global_reference *ref)
{
struct ttm_bo_global_ref *bo_ref =
container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object;
int ret;
 
// mutex_init(&glob->device_list_mutex);
// spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
// glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
goto out_no_drp;
}
 
INIT_LIST_HEAD(&glob->swap_lru);
INIT_LIST_HEAD(&glob->device_list);
 
// ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
if (unlikely(ret != 0)) {
printk(KERN_ERR TTM_PFX
"Could not register buffer object swapout.\n");
goto out_no_shrink;
}
 
glob->ttm_bo_extra_size =
ttm_round_pot(sizeof(struct ttm_tt)) +
ttm_round_pot(sizeof(struct ttm_backend));
 
glob->ttm_bo_size = glob->ttm_bo_extra_size +
ttm_round_pot(sizeof(struct ttm_buffer_object));
 
atomic_set(&glob->bo_count, 0);
 
// kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
// ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
// if (unlikely(ret != 0))
// kobject_put(&glob->kobj);
return ret;
out_no_shrink:
__free_page(glob->dummy_read_page);
out_no_drp:
kfree(glob);
return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);
 
 
/drivers/video/drm/ttm/ttm_global.c
0,0 → 1,114
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#include "ttm/ttm_module.h"
//#include <linux/mutex.h>
//#include <linux/slab.h>
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/module.h>
 
struct ttm_global_item {
// struct mutex mutex;
void *object;
int refcount;
};
 
static struct ttm_global_item glob[TTM_GLOBAL_NUM];
 
void ttm_global_init(void)
{
int i;
 
for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
struct ttm_global_item *item = &glob[i];
// mutex_init(&item->mutex);
item->object = NULL;
item->refcount = 0;
}
}
 
void ttm_global_release(void)
{
int i;
for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
struct ttm_global_item *item = &glob[i];
BUG_ON(item->object != NULL);
BUG_ON(item->refcount != 0);
}
}
 
int ttm_global_item_ref(struct ttm_global_reference *ref)
{
int ret;
struct ttm_global_item *item = &glob[ref->global_type];
void *object;
 
// mutex_lock(&item->mutex);
if (item->refcount == 0) {
item->object = kzalloc(ref->size, GFP_KERNEL);
if (unlikely(item->object == NULL)) {
ret = -ENOMEM;
goto out_err;
}
 
ref->object = item->object;
ret = ref->init(ref);
if (unlikely(ret != 0))
goto out_err;
 
}
++item->refcount;
ref->object = item->object;
object = item->object;
// mutex_unlock(&item->mutex);
return 0;
out_err:
// mutex_unlock(&item->mutex);
item->object = NULL;
return ret;
}
EXPORT_SYMBOL(ttm_global_item_ref);
 
void ttm_global_item_unref(struct ttm_global_reference *ref)
{
struct ttm_global_item *item = &glob[ref->global_type];
 
// mutex_lock(&item->mutex);
BUG_ON(item->refcount == 0);
BUG_ON(ref->object != item->object);
if (--item->refcount == 0) {
ref->release(ref);
item->object = NULL;
}
// mutex_unlock(&item->mutex);
}
EXPORT_SYMBOL(ttm_global_item_unref);