/drivers/video/drm/drm_crtc.c |
---|
222,6 → 222,7 |
obj->id = new_id; |
obj->type = obj_type; |
return 0; |
} |
361,6 → 362,8 |
void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs) |
{ |
ENTRY(); |
crtc->dev = dev; |
crtc->funcs = funcs; |
369,7 → 372,10 |
list_add_tail(&crtc->head, &dev->mode_config.crtc_list); |
dev->mode_config.num_crtc++; |
// mutex_unlock(&dev->mode_config.mutex); |
LEAVE(); |
} |
EXPORT_SYMBOL(drm_crtc_init); |
586,6 → 592,7 |
struct drm_property *dpms; |
int i; |
ENTRY(); |
/* |
* Standard properties (apply to all connectors) |
*/ |
601,6 → 608,7 |
drm_dpms_enum_list[i].name); |
dev->mode_config.dpms_property = dpms; |
LEAVE(); |
return 0; |
} |
794,6 → 802,8 |
*/ |
void drm_mode_config_init(struct drm_device *dev) |
{ |
ENTRY(); |
// mutex_init(&dev->mode_config.mutex); |
// mutex_init(&dev->mode_config.idr_mutex); |
INIT_LIST_HEAD(&dev->mode_config.fb_list); |
803,6 → 813,7 |
INIT_LIST_HEAD(&dev->mode_config.encoder_list); |
INIT_LIST_HEAD(&dev->mode_config.property_list); |
INIT_LIST_HEAD(&dev->mode_config.property_blob_list); |
idr_init(&dev->mode_config.crtc_idr); |
// mutex_lock(&dev->mode_config.mutex); |
814,6 → 825,9 |
dev->mode_config.num_connector = 0; |
dev->mode_config.num_crtc = 0; |
dev->mode_config.num_encoder = 0; |
LEAVE(); |
} |
EXPORT_SYMBOL(drm_mode_config_init); |
1946,6 → 1960,7 |
} |
drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); |
property->flags = flags; |
property->num_values = num_values; |
INIT_LIST_HEAD(&property->enum_blob_list); |
1953,7 → 1968,11 |
if (name) |
strncpy(property->name, name, DRM_PROP_NAME_LEN); |
list_add_tail(&property->head, &dev->mode_config.property_list); |
dbgprintf("%s %x name %s\n", __FUNCTION__, property, name); |
return property; |
fail: |
kfree(property); |
/drivers/video/drm/drm_crtc_helper.c |
---|
479,6 → 479,8 |
drm_pick_crtcs(dev, crtcs, modes, 0, width, height); |
dbgprintf("done\n"); |
i = 0; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
struct drm_display_mode *mode = modes[i]; |
502,6 → 504,8 |
kfree(crtcs); |
kfree(modes); |
kfree(enabled); |
LEAVE(); |
} |
/** |
518,7 → 522,7 |
struct drm_crtc *tmp; |
int crtc_mask = 1; |
WARN(!crtc, "checking null crtc?"); |
// WARN(!crtc, "checking null crtc?"); |
dev = crtc->dev; |
909,7 → 913,7 |
drm_setup_crtcs(dev); |
/* alert the driver fb layer */ |
dev->mode_config.funcs->fb_changed(dev); |
// dev->mode_config.funcs->fb_changed(dev); |
/* FIXME: send hotplug event */ |
return true; |
933,6 → 937,8 |
struct drm_connector *connector; |
int count = 0; |
ENTRY(); |
count = drm_helper_probe_connector_modes(dev, |
dev->mode_config.max_width, |
dev->mode_config.max_height); |
952,8 → 958,10 |
drm_setup_crtcs(dev); |
/* alert the driver fb layer */ |
dev->mode_config.funcs->fb_changed(dev); |
// dev->mode_config.funcs->fb_changed(dev); |
LEAVE(); |
return 0; |
} |
EXPORT_SYMBOL(drm_helper_initial_config); |
/drivers/video/drm/drm_edid.c |
---|
27,8 → 27,12 |
* DEALINGS IN THE SOFTWARE. |
*/ |
//#include <linux/kernel.h> |
//#include <linux/i2c.h> |
//#include <linux/i2c-algo-bit.h> |
#include <types.h> |
#include <list.h> |
#include <linux/idr.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
#include "drmP.h" |
#include "drm_edid.h" |
/drivers/video/drm/i2c/i2c-algo-bit.c |
---|
0,0 → 1,615 |
/* ------------------------------------------------------------------------- |
* i2c-algo-bit.c i2c driver algorithms for bit-shift adapters |
* ------------------------------------------------------------------------- |
* Copyright (C) 1995-2000 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
* ------------------------------------------------------------------------- */ |
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki |
<kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */ |
#include <types.h> |
#include <list.h> |
#include <syscall.h> |
#include <errno.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
/* ----- global defines ----------------------------------------------- */ |
#ifdef DEBUG |
#define bit_dbg(level, dev, format, args...) \ |
do { \ |
if (i2c_debug >= level) \ |
dev_dbg(dev, format, ##args); \ |
} while (0) |
#else |
#define bit_dbg(level, dev, format, args...) \ |
do {} while (0) |
#endif /* DEBUG */ |
/* ----- global variables --------------------------------------------- */ |
static int bit_test; /* see if the line-setting functions work */ |
/* --- setting states on the bus with the right timing: --------------- */ |
#define setsda(adap, val) adap->setsda(adap->data, val) |
#define setscl(adap, val) adap->setscl(adap->data, val) |
#define getsda(adap) adap->getsda(adap->data) |
#define getscl(adap) adap->getscl(adap->data) |
static inline void sdalo(struct i2c_algo_bit_data *adap) |
{ |
setsda(adap, 0); |
udelay((adap->udelay + 1) / 2); |
} |
static inline void sdahi(struct i2c_algo_bit_data *adap) |
{ |
setsda(adap, 1); |
udelay((adap->udelay + 1) / 2); |
} |
static inline void scllo(struct i2c_algo_bit_data *adap) |
{ |
setscl(adap, 0); |
udelay(adap->udelay / 2); |
} |
/* |
* Raise scl line, and do checking for delays. This is necessary for slower |
* devices. |
*/ |
static int sclhi(struct i2c_algo_bit_data *adap) |
{ |
unsigned long start; |
setscl(adap, 1); |
/* Not all adapters have scl sense line... */ |
if (!adap->getscl) |
goto done; |
// start = jiffies; |
while (!getscl(adap)) { |
/* This hw knows how to read the clock line, so we wait |
* until it actually gets high. This is safer as some |
* chips may hold it low ("clock stretching") while they |
* are processing data internally. |
*/ |
// if (time_after(jiffies, start + adap->timeout)) |
// return -ETIMEDOUT; |
udelay(adap->udelay); |
// cond_resched(); |
} |
#ifdef DEBUG |
if (jiffies != start && i2c_debug >= 3) |
pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " |
"high\n", jiffies - start); |
#endif |
done: |
udelay(adap->udelay); |
return 0; |
} |
/* --- other auxiliary functions -------------------------------------- */ |
static void i2c_start(struct i2c_algo_bit_data *adap) |
{ |
/* assert: scl, sda are high */ |
setsda(adap, 0); |
udelay(adap->udelay); |
scllo(adap); |
} |
static void i2c_repstart(struct i2c_algo_bit_data *adap) |
{ |
/* assert: scl is low */ |
sdahi(adap); |
sclhi(adap); |
setsda(adap, 0); |
udelay(adap->udelay); |
scllo(adap); |
} |
static void i2c_stop(struct i2c_algo_bit_data *adap) |
{ |
/* assert: scl is low */ |
sdalo(adap); |
sclhi(adap); |
setsda(adap, 1); |
udelay(adap->udelay); |
} |
/* send a byte without start cond., look for arbitration, |
check ackn. from slave */ |
/* returns: |
* 1 if the device acknowledged |
* 0 if the device did not ack |
* -ETIMEDOUT if an error occurred (while raising the scl line) |
*/ |
static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) |
{ |
int i; |
int sb; |
int ack; |
struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
/* assert: scl is low */ |
for (i = 7; i >= 0; i--) { |
sb = (c >> i) & 1; |
setsda(adap, sb); |
udelay((adap->udelay + 1) / 2); |
if (sclhi(adap) < 0) { /* timed out */ |
// bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " |
// "timeout at bit #%d\n", (int)c, i); |
return -ETIMEDOUT; |
} |
/* FIXME do arbitration here: |
* if (sb && !getsda(adap)) -> ouch! Get out of here. |
* |
* Report a unique code, so higher level code can retry |
* the whole (combined) message and *NOT* issue STOP. |
*/ |
scllo(adap); |
} |
sdahi(adap); |
if (sclhi(adap) < 0) { /* timeout */ |
// bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " |
// "timeout at ack\n", (int)c); |
return -ETIMEDOUT; |
} |
/* read ack: SDA should be pulled down by slave, or it may |
* NAK (usually to report problems with the data we wrote). |
*/ |
ack = !getsda(adap); /* ack: sda is pulled low -> success */ |
// bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c, |
// ack ? "A" : "NA"); |
scllo(adap); |
return ack; |
/* assert: scl is low (sda undef) */ |
} |
static int i2c_inb(struct i2c_adapter *i2c_adap) |
{ |
/* read byte via i2c port, without start/stop sequence */ |
/* acknowledge is sent in i2c_read. */ |
int i; |
unsigned char indata = 0; |
struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
/* assert: scl is low */ |
sdahi(adap); |
for (i = 0; i < 8; i++) { |
if (sclhi(adap) < 0) { /* timeout */ |
bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " |
"#%d\n", 7 - i); |
return -ETIMEDOUT; |
} |
indata *= 2; |
if (getsda(adap)) |
indata |= 0x01; |
setscl(adap, 0); |
udelay(i == 7 ? adap->udelay / 2 : adap->udelay); |
} |
/* assert: scl is low */ |
return indata; |
} |
/* |
* Sanity check for the adapter hardware - check the reaction of |
* the bus lines only if it seems to be idle. |
*/ |
static int test_bus(struct i2c_algo_bit_data *adap, char *name) |
{ |
int scl, sda; |
if (adap->getscl == NULL) |
pr_info("%s: Testing SDA only, SCL is not readable\n", name); |
sda = getsda(adap); |
scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
if (!scl || !sda) { |
printk(KERN_WARNING "%s: bus seems to be busy\n", name); |
goto bailout; |
} |
sdalo(adap); |
sda = getsda(adap); |
scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
if (sda) { |
printk(KERN_WARNING "%s: SDA stuck high!\n", name); |
goto bailout; |
} |
if (!scl) { |
printk(KERN_WARNING "%s: SCL unexpected low " |
"while pulling SDA low!\n", name); |
goto bailout; |
} |
sdahi(adap); |
sda = getsda(adap); |
scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
if (!sda) { |
printk(KERN_WARNING "%s: SDA stuck low!\n", name); |
goto bailout; |
} |
if (!scl) { |
printk(KERN_WARNING "%s: SCL unexpected low " |
"while pulling SDA high!\n", name); |
goto bailout; |
} |
scllo(adap); |
sda = getsda(adap); |
scl = (adap->getscl == NULL) ? 0 : getscl(adap); |
if (scl) { |
printk(KERN_WARNING "%s: SCL stuck high!\n", name); |
goto bailout; |
} |
if (!sda) { |
printk(KERN_WARNING "%s: SDA unexpected low " |
"while pulling SCL low!\n", name); |
goto bailout; |
} |
sclhi(adap); |
sda = getsda(adap); |
scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
if (!scl) { |
printk(KERN_WARNING "%s: SCL stuck low!\n", name); |
goto bailout; |
} |
if (!sda) { |
printk(KERN_WARNING "%s: SDA unexpected low " |
"while pulling SCL high!\n", name); |
goto bailout; |
} |
pr_info("%s: Test OK\n", name); |
return 0; |
bailout: |
sdahi(adap); |
sclhi(adap); |
return -ENODEV; |
} |
/* ----- Utility functions |
*/ |
/* try_address tries to contact a chip for a number of |
* times before it gives up. |
* return values: |
* 1 chip answered |
* 0 chip did not answer |
* -x transmission error |
*/ |
static int try_address(struct i2c_adapter *i2c_adap, |
unsigned char addr, int retries) |
{ |
struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
int i, ret = 0; |
for (i = 0; i <= retries; i++) { |
ret = i2c_outb(i2c_adap, addr); |
if (ret == 1 || i == retries) |
break; |
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n"); |
i2c_stop(adap); |
udelay(adap->udelay); |
// yield(); |
bit_dbg(3, &i2c_adap->dev, "emitting start condition\n"); |
i2c_start(adap); |
} |
if (i && ret) |
bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " |
"0x%02x: %s\n", i + 1, |
addr & 1 ? "read from" : "write to", addr >> 1, |
ret == 1 ? "success" : "failed, timeout?"); |
return ret; |
} |
static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) |
{ |
const unsigned char *temp = msg->buf; |
int count = msg->len; |
unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; |
int retval; |
int wrcount = 0; |
while (count > 0) { |
retval = i2c_outb(i2c_adap, *temp); |
/* OK/ACK; or ignored NAK */ |
if ((retval > 0) || (nak_ok && (retval == 0))) { |
count--; |
temp++; |
wrcount++; |
/* A slave NAKing the master means the slave didn't like |
* something about the data it saw. For example, maybe |
* the SMBus PEC was wrong. |
*/ |
} else if (retval == 0) { |
// dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n"); |
return -EIO; |
/* Timeout; or (someday) lost arbitration |
* |
* FIXME Lost ARB implies retrying the transaction from |
* the first message, after the "winning" master issues |
* its STOP. As a rule, upper layer code has no reason |
* to know or care about this ... it is *NOT* an error. |
*/ |
} else { |
// dev_err(&i2c_adap->dev, "sendbytes: error %d\n", |
// retval); |
return retval; |
} |
} |
return wrcount; |
} |
static int acknak(struct i2c_adapter *i2c_adap, int is_ack) |
{ |
struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
/* assert: sda is high */ |
if (is_ack) /* send ack */ |
setsda(adap, 0); |
udelay((adap->udelay + 1) / 2); |
if (sclhi(adap) < 0) { /* timeout */ |
// dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n"); |
// return -ETIMEDOUT; |
} |
scllo(adap); |
return 0; |
} |
static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) |
{ |
int inval; |
int rdcount = 0; /* counts bytes read */ |
unsigned char *temp = msg->buf; |
int count = msg->len; |
const unsigned flags = msg->flags; |
while (count > 0) { |
inval = i2c_inb(i2c_adap); |
if (inval >= 0) { |
*temp = inval; |
rdcount++; |
} else { /* read timed out */ |
break; |
} |
temp++; |
count--; |
/* Some SMBus transactions require that we receive the |
transaction length as the first read byte. */ |
if (rdcount == 1 && (flags & I2C_M_RECV_LEN)) { |
if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { |
if (!(flags & I2C_M_NO_RD_ACK)) |
acknak(i2c_adap, 0); |
// dev_err(&i2c_adap->dev, "readbytes: invalid " |
// "block length (%d)\n", inval); |
return -EREMOTEIO; |
} |
/* The original count value accounts for the extra |
bytes, that is, either 1 for a regular transaction, |
or 2 for a PEC transaction. */ |
count += inval; |
msg->len += inval; |
} |
// bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n", |
// inval, |
// (flags & I2C_M_NO_RD_ACK) |
// ? "(no ack/nak)" |
// : (count ? "A" : "NA")); |
if (!(flags & I2C_M_NO_RD_ACK)) { |
inval = acknak(i2c_adap, count); |
if (inval < 0) |
return inval; |
} |
} |
return rdcount; |
} |
/* doAddress initiates the transfer by generating the start condition (in |
* try_address) and transmits the address in the necessary format to handle |
* reads, writes as well as 10bit-addresses. |
* returns: |
* 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set |
* -x an error occurred (like: -EREMOTEIO if the device did not answer, or |
* -ETIMEDOUT, for example if the lines are stuck...) |
*/ |
static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) |
{ |
unsigned short flags = msg->flags; |
unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; |
struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
unsigned char addr; |
int ret, retries; |
retries = nak_ok ? 0 : i2c_adap->retries; |
if (flags & I2C_M_TEN) { |
/* a ten bit address */ |
addr = 0xf0 | ((msg->addr >> 7) & 0x03); |
bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); |
/* try extended address code...*/ |
ret = try_address(i2c_adap, addr, retries); |
if ((ret != 1) && !nak_ok) { |
// dev_err(&i2c_adap->dev, |
// "died at extended address code\n"); |
return -EREMOTEIO; |
} |
/* the remaining 8 bit address */ |
ret = i2c_outb(i2c_adap, msg->addr & 0x7f); |
if ((ret != 1) && !nak_ok) { |
/* the chip did not ack / xmission error occurred */ |
// dev_err(&i2c_adap->dev, "died at 2nd address code\n"); |
return -EREMOTEIO; |
} |
if (flags & I2C_M_RD) { |
bit_dbg(3, &i2c_adap->dev, "emitting repeated " |
"start condition\n"); |
i2c_repstart(adap); |
/* okay, now switch into reading mode */ |
addr |= 0x01; |
ret = try_address(i2c_adap, addr, retries); |
if ((ret != 1) && !nak_ok) { |
// dev_err(&i2c_adap->dev, |
// "died at repeated address code\n"); |
return -EREMOTEIO; |
} |
} |
} else { /* normal 7bit address */ |
addr = msg->addr << 1; |
if (flags & I2C_M_RD) |
addr |= 1; |
if (flags & I2C_M_REV_DIR_ADDR) |
addr ^= 1; |
ret = try_address(i2c_adap, addr, retries); |
if ((ret != 1) && !nak_ok) |
return -ENXIO; |
} |
return 0; |
} |
static int bit_xfer(struct i2c_adapter *i2c_adap, |
struct i2c_msg msgs[], int num) |
{ |
struct i2c_msg *pmsg; |
struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
int i, ret; |
unsigned short nak_ok; |
bit_dbg(3, &i2c_adap->dev, "emitting start condition\n"); |
i2c_start(adap); |
for (i = 0; i < num; i++) { |
pmsg = &msgs[i]; |
nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; |
if (!(pmsg->flags & I2C_M_NOSTART)) { |
if (i) { |
bit_dbg(3, &i2c_adap->dev, "emitting " |
"repeated start condition\n"); |
i2c_repstart(adap); |
} |
ret = bit_doAddress(i2c_adap, pmsg); |
if ((ret != 0) && !nak_ok) { |
bit_dbg(1, &i2c_adap->dev, "NAK from " |
"device addr 0x%02x msg #%d\n", |
msgs[i].addr, i); |
goto bailout; |
} |
} |
if (pmsg->flags & I2C_M_RD) { |
/* read bytes into buffer*/ |
ret = readbytes(i2c_adap, pmsg); |
if (ret >= 1) |
bit_dbg(2, &i2c_adap->dev, "read %d byte%s\n", |
ret, ret == 1 ? "" : "s"); |
if (ret < pmsg->len) { |
if (ret >= 0) |
ret = -EREMOTEIO; |
goto bailout; |
} |
} else { |
/* write bytes from buffer */ |
ret = sendbytes(i2c_adap, pmsg); |
if (ret >= 1) |
bit_dbg(2, &i2c_adap->dev, "wrote %d byte%s\n", |
ret, ret == 1 ? "" : "s"); |
if (ret < pmsg->len) { |
if (ret >= 0) |
ret = -EREMOTEIO; |
goto bailout; |
} |
} |
} |
ret = i; |
bailout: |
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n"); |
i2c_stop(adap); |
return ret; |
} |
static u32 bit_func(struct i2c_adapter *adap) |
{ |
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | |
I2C_FUNC_SMBUS_READ_BLOCK_DATA | |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL | |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; |
} |
/* -----exported algorithm data: ------------------------------------- */ |
static const struct i2c_algorithm i2c_bit_algo = { |
.master_xfer = bit_xfer, |
.functionality = bit_func, |
}; |
/* |
* registering functions to load algorithms at runtime |
*/ |
static int i2c_bit_prepare_bus(struct i2c_adapter *adap) |
{ |
struct i2c_algo_bit_data *bit_adap = adap->algo_data; |
// if (bit_test) { |
// int ret = test_bus(bit_adap, adap->name); |
// if (ret < 0) |
// return -ENODEV; |
// } |
/* register new adapter to i2c module... */ |
adap->algo = &i2c_bit_algo; |
adap->retries = 3; |
return 0; |
} |
int i2c_bit_add_bus(struct i2c_adapter *adap) |
{ |
int err; |
err = i2c_bit_prepare_bus(adap); |
if (err) |
return err; |
return 0; //i2c_add_adapter(adap); |
} |
/drivers/video/drm/i2c/i2c-core.c |
---|
0,0 → 1,108 |
/* i2c-core.c - a device driver for the iic-bus interface */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-99 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>. |
All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl> |
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and |
Jean Delvare <khali@linux-fr.org> */ |
#include <types.h> |
#include <list.h> |
#include <errno.h> |
#include <linux/i2c.h> |
#include <syscall.h> |
/** |
* i2c_transfer - execute a single or combined I2C message |
* @adap: Handle to I2C bus |
* @msgs: One or more messages to execute before STOP is issued to |
* terminate the operation; each message begins with a START. |
* @num: Number of messages to be executed. |
* |
* Returns negative errno, else the number of messages executed. |
* |
* Note that there is no requirement that each message be sent to |
* the same slave address, although that is the most common model. |
*/ |
int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) |
{ |
unsigned long orig_jiffies; |
int ret, try; |
/* REVISIT the fault reporting model here is weak: |
* |
* - When we get an error after receiving N bytes from a slave, |
* there is no way to report "N". |
* |
* - When we get a NAK after transmitting N bytes to a slave, |
* there is no way to report "N" ... or to let the master |
* continue executing the rest of this combined message, if |
* that's the appropriate response. |
* |
* - When for example "num" is two and we successfully complete |
* the first message but get an error part way through the |
* second, it's unclear whether that should be reported as |
* one (discarding status on the second message) or errno |
* (discarding status on the first one). |
*/ |
if (adap->algo->master_xfer) { |
#ifdef DEBUG |
for (ret = 0; ret < num; ret++) { |
dev_dbg(&adap->dev, "master_xfer[%d] %c, addr=0x%02x, " |
"len=%d%s\n", ret, (msgs[ret].flags & I2C_M_RD) |
? 'R' : 'W', msgs[ret].addr, msgs[ret].len, |
(msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : ""); |
} |
#endif |
// if (in_atomic() || irqs_disabled()) { |
// ret = mutex_trylock(&adap->bus_lock); |
// if (!ret) |
// /* I2C activity is ongoing. */ |
// return -EAGAIN; |
// } else { |
// mutex_lock_nested(&adap->bus_lock, adap->level); |
// } |
/* Retry automatically on arbitration loss */ |
// orig_jiffies = jiffies; |
for (ret = 0, try = 0; try <= adap->retries; try++) { |
ret = adap->algo->master_xfer(adap, msgs, num); |
if (ret != -EAGAIN) |
break; |
// if (time_after(jiffies, orig_jiffies + adap->timeout)) |
// break; |
delay(1); |
} |
// mutex_unlock(&adap->bus_lock); |
return ret; |
} else { |
// dev_dbg(&adap->dev, "I2C level transfers not supported\n"); |
return -EOPNOTSUPP; |
} |
} |
EXPORT_SYMBOL(i2c_transfer); |
/drivers/video/drm/idr.c |
---|
0,0 → 1,1064 |
/* |
* 2002-10-18 written by Jim Houston jim.houston@ccur.com |
* Copyright (C) 2002 by Concurrent Computer Corporation |
* Distributed under the GNU GPL license version 2. |
* |
* Modified by George Anzinger to reuse immediately and to use |
* find bit instructions. Also removed _irq on spinlocks. |
* |
* Modified by Nadia Derbey to make it RCU safe. |
* |
* Small id to pointer translation service. |
* |
* It uses a radix tree like structure as a sparse array indexed |
* by the id to obtain the pointer. The bitmap makes allocating |
* a new id quick. |
* |
* You call it to allocate an id (an int) an associate with that id a |
* pointer or what ever, we treat it as a (void *). You can pass this |
* id to a user for him to pass back at a later time. You then pass |
* that id to this code and it returns your pointer. |
* You can release ids at any time. When all ids are released, most of |
* the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
* don't need to go to the memory "store" during an id allocate, just |
* so you don't need to be too concerned about locking and conflicts |
* with the slab allocator. |
*/ |
#include <linux/idr.h> |
#define ADDR "=m" (*(volatile long *) addr) |
static inline void __set_bit(int nr, volatile void *addr) |
{ |
asm volatile("bts %1,%0" |
: ADDR |
: "Ir" (nr) : "memory"); |
} |
static inline void __clear_bit(int nr, volatile void *addr) |
{ |
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
} |
static inline int constant_test_bit(int nr, const volatile void *addr) |
{ |
return ((1UL << (nr % 32)) & |
(((unsigned long *)addr)[nr / 32])) != 0; |
} |
static inline int variable_test_bit(int nr, volatile const void *addr) |
{ |
int oldbit; |
asm volatile("bt %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
}; |
#define test_bit(nr,addr) \ |
(__builtin_constant_p(nr) ? \ |
constant_test_bit((nr),(addr)) : \ |
variable_test_bit((nr),(addr))) |
static inline int fls(int x) |
{ |
int r; |
__asm__("bsrl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
return r+1; |
} |
static inline unsigned long __ffs(unsigned long word) |
{ |
__asm__("bsfl %1,%0" |
:"=r" (word) |
:"rm" (word)); |
return word; |
} |
static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) |
{ |
unsigned x = 0; |
while (x < size) { |
unsigned long val = *addr++; |
if (val) |
return __ffs(val) + x; |
x += (sizeof(*addr)<<3); |
} |
return x; |
} |
int find_next_bit(const unsigned long *addr, int size, int offset) |
{ |
const unsigned long *p = addr + (offset >> 5); |
int set = 0, bit = offset & 31, res; |
if (bit) |
{ |
/* |
* Look for nonzero in the first 32 bits: |
*/ |
__asm__("bsfl %1,%0\n\t" |
"jne 1f\n\t" |
"movl $32, %0\n" |
"1:" |
: "=r" (set) |
: "r" (*p >> bit)); |
if (set < (32 - bit)) |
return set + offset; |
set = 32 - bit; |
p++; |
} |
/* |
* No set bit yet, search remaining full words for a bit |
*/ |
res = find_first_bit (p, size - 32 * (p - addr)); |
return (offset + set + res); |
} |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
#define rcu_dereference(p) ({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
(_________p1); \ |
}) |
#define rcu_assign_pointer(p, v) \ |
({ \ |
if (!__builtin_constant_p(v) || \ |
((v) != NULL)) \ |
(p) = (v); \ |
}) |
//static struct kmem_cache *idr_layer_cache; |
static struct idr_layer *get_from_free_list(struct idr *idp) |
{ |
struct idr_layer *p; |
unsigned long flags; |
// spin_lock_irqsave(&idp->lock, flags); |
if ((p = idp->id_free)) { |
idp->id_free = p->ary[0]; |
idp->id_free_cnt--; |
p->ary[0] = NULL; |
} |
// spin_unlock_irqrestore(&idp->lock, flags); |
return(p); |
} |
static void idr_layer_rcu_free(struct rcu_head *head) |
{ |
struct idr_layer *layer; |
layer = container_of(head, struct idr_layer, rcu_head); |
kfree(layer); |
} |
static inline void free_layer(struct idr_layer *p) |
{ |
kfree(p); |
} |
/* only called when idp->lock is held */ |
static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
{ |
p->ary[0] = idp->id_free; |
idp->id_free = p; |
idp->id_free_cnt++; |
} |
static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
{ |
unsigned long flags; |
/* |
* Depends on the return element being zeroed. |
*/ |
// spin_lock_irqsave(&idp->lock, flags); |
__move_to_free_list(idp, p); |
// spin_unlock_irqrestore(&idp->lock, flags); |
} |
static void idr_mark_full(struct idr_layer **pa, int id) |
{ |
struct idr_layer *p = pa[0]; |
int l = 0; |
__set_bit(id & IDR_MASK, &p->bitmap); |
/* |
* If this layer is full mark the bit in the layer above to |
* show that this part of the radix tree is full. This may |
* complete the layer above and require walking up the radix |
* tree. |
*/ |
while (p->bitmap == IDR_FULL) { |
if (!(p = pa[++l])) |
break; |
id = id >> IDR_BITS; |
__set_bit((id & IDR_MASK), &p->bitmap); |
} |
} |
/** |
* idr_pre_get - reserver resources for idr allocation |
* @idp: idr handle |
* @gfp_mask: memory allocation flags |
* |
* This function should be called prior to locking and calling the |
* idr_get_new* functions. It preallocates enough memory to satisfy |
* the worst possible allocation. |
* |
* If the system is REALLY out of memory this function returns 0, |
* otherwise 1. |
*/ |
int idr_pre_get(struct idr *idp, u32_t gfp_mask) |
{ |
while (idp->id_free_cnt < IDR_FREE_MAX) { |
struct idr_layer *new; |
new = kzalloc(sizeof(new), gfp_mask); |
if (new == NULL) |
return (0); |
move_to_free_list(idp, new); |
} |
return 1; |
} |
EXPORT_SYMBOL(idr_pre_get); |
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
{ |
int n, m, sh; |
struct idr_layer *p, *new; |
int l, id, oid; |
unsigned long bm; |
id = *starting_id; |
restart: |
p = idp->top; |
l = idp->layers; |
pa[l--] = NULL; |
while (1) { |
/* |
* We run around this while until we reach the leaf node... |
*/ |
n = (id >> (IDR_BITS*l)) & IDR_MASK; |
bm = ~p->bitmap; |
m = find_next_bit(&bm, IDR_SIZE, n); |
if (m == IDR_SIZE) { |
/* no space available go back to previous layer. */ |
l++; |
oid = id; |
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
/* if already at the top layer, we need to grow */ |
if (!(p = pa[l])) { |
*starting_id = id; |
return IDR_NEED_TO_GROW; |
} |
/* If we need to go up one layer, continue the |
* loop; otherwise, restart from the top. |
*/ |
sh = IDR_BITS * (l + 1); |
if (oid >> sh == id >> sh) |
continue; |
else |
goto restart; |
} |
if (m != n) { |
sh = IDR_BITS*l; |
id = ((id >> sh) ^ n ^ m) << sh; |
} |
if ((id >= MAX_ID_BIT) || (id < 0)) |
return IDR_NOMORE_SPACE; |
if (l == 0) |
break; |
/* |
* Create the layer below if it is missing. |
*/ |
if (!p->ary[m]) { |
new = get_from_free_list(idp); |
if (!new) |
return -1; |
new->layer = l-1; |
rcu_assign_pointer(p->ary[m], new); |
p->count++; |
} |
pa[l--] = p; |
p = p->ary[m]; |
} |
pa[l] = p; |
return id; |
} |
static int idr_get_empty_slot(struct idr *idp, int starting_id, |
struct idr_layer **pa) |
{ |
struct idr_layer *p, *new; |
int layers, v, id; |
unsigned long flags; |
id = starting_id; |
build_up: |
p = idp->top; |
layers = idp->layers; |
if (unlikely(!p)) { |
if (!(p = get_from_free_list(idp))) |
return -1; |
p->layer = 0; |
layers = 1; |
} |
/* |
* Add a new layer to the top of the tree if the requested |
* id is larger than the currently allocated space. |
*/ |
while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
layers++; |
if (!p->count) { |
/* special case: if the tree is currently empty, |
* then we grow the tree by moving the top node |
* upwards. |
*/ |
p->layer++; |
continue; |
} |
if (!(new = get_from_free_list(idp))) { |
/* |
* The allocation failed. If we built part of |
* the structure tear it down. |
*/ |
// spin_lock_irqsave(&idp->lock, flags); |
for (new = p; p && p != idp->top; new = p) { |
p = p->ary[0]; |
new->ary[0] = NULL; |
new->bitmap = new->count = 0; |
__move_to_free_list(idp, new); |
} |
// spin_unlock_irqrestore(&idp->lock, flags); |
return -1; |
} |
new->ary[0] = p; |
new->count = 1; |
new->layer = layers-1; |
if (p->bitmap == IDR_FULL) |
__set_bit(0, &new->bitmap); |
p = new; |
} |
rcu_assign_pointer(idp->top, p); |
idp->layers = layers; |
v = sub_alloc(idp, &id, pa); |
if (v == IDR_NEED_TO_GROW) |
goto build_up; |
return(v); |
} |
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
{ |
struct idr_layer *pa[MAX_LEVEL]; |
int id; |
id = idr_get_empty_slot(idp, starting_id, pa); |
if (id >= 0) { |
/* |
* Successfully found an empty slot. Install the user |
* pointer and mark the slot full. |
*/ |
rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
(struct idr_layer *)ptr); |
pa[0]->count++; |
idr_mark_full(pa, id); |
} |
return id; |
} |
/** |
* idr_get_new_above - allocate new idr entry above or equal to a start id |
* @idp: idr handle |
* @ptr: pointer you want associated with the ide |
* @start_id: id to start search at |
* @id: pointer to the allocated handle |
* |
* This is the allocate id function. It should be called with any |
* required locks. |
* |
* If memory is required, it will return -EAGAIN, you should unlock |
* and go back to the idr_pre_get() call. If the idr is full, it will |
* return -ENOSPC. |
* |
* @id returns a value in the range @starting_id ... 0x7fffffff |
*/ |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
{ |
int rv; |
rv = idr_get_new_above_int(idp, ptr, starting_id); |
/* |
* This is a cheap hack until the IDR code can be fixed to |
* return proper error values. |
*/ |
if (rv < 0) |
return _idr_rc_to_errno(rv); |
*id = rv; |
return 0; |
} |
EXPORT_SYMBOL(idr_get_new_above); |
/** |
* idr_get_new - allocate new idr entry |
* @idp: idr handle |
* @ptr: pointer you want associated with the ide |
* @id: pointer to the allocated handle |
* |
* This is the allocate id function. It should be called with any |
* required locks. |
* |
* If memory is required, it will return -EAGAIN, you should unlock |
* and go back to the idr_pre_get() call. If the idr is full, it will |
* return -ENOSPC. |
* |
* @id returns a value in the range 0 ... 0x7fffffff |
*/ |
int idr_get_new(struct idr *idp, void *ptr, int *id) |
{ |
int rv; |
rv = idr_get_new_above_int(idp, ptr, 0); |
/* |
* This is a cheap hack until the IDR code can be fixed to |
* return proper error values. |
*/ |
if (rv < 0) |
return _idr_rc_to_errno(rv); |
*id = rv; |
return 0; |
} |
EXPORT_SYMBOL(idr_get_new); |
static void idr_remove_warning(int id) |
{ |
printk(KERN_WARNING |
"idr_remove called for id=%d which is not allocated.\n", id); |
// dump_stack(); |
} |
static void sub_remove(struct idr *idp, int shift, int id) |
{ |
struct idr_layer *p = idp->top; |
struct idr_layer **pa[MAX_LEVEL]; |
struct idr_layer ***paa = &pa[0]; |
struct idr_layer *to_free; |
int n; |
*paa = NULL; |
*++paa = &idp->top; |
while ((shift > 0) && p) { |
n = (id >> shift) & IDR_MASK; |
__clear_bit(n, &p->bitmap); |
*++paa = &p->ary[n]; |
p = p->ary[n]; |
shift -= IDR_BITS; |
} |
n = id & IDR_MASK; |
if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
__clear_bit(n, &p->bitmap); |
rcu_assign_pointer(p->ary[n], NULL); |
to_free = NULL; |
while(*paa && ! --((**paa)->count)){ |
if (to_free) |
free_layer(to_free); |
to_free = **paa; |
**paa-- = NULL; |
} |
if (!*paa) |
idp->layers = 0; |
if (to_free) |
free_layer(to_free); |
} else |
idr_remove_warning(id); |
} |
/** |
* idr_remove - remove the given id and free it's slot |
* @idp: idr handle |
* @id: unique key |
*/ |
void idr_remove(struct idr *idp, int id) |
{ |
struct idr_layer *p; |
struct idr_layer *to_free; |
/* Mask off upper bits we don't use for the search. */ |
id &= MAX_ID_MASK; |
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
idp->top->ary[0]) { |
/* |
* Single child at leftmost slot: we can shrink the tree. |
* This level is not needed anymore since when layers are |
* inserted, they are inserted at the top of the existing |
* tree. |
*/ |
to_free = idp->top; |
p = idp->top->ary[0]; |
rcu_assign_pointer(idp->top, p); |
--idp->layers; |
to_free->bitmap = to_free->count = 0; |
free_layer(to_free); |
} |
while (idp->id_free_cnt >= IDR_FREE_MAX) { |
p = get_from_free_list(idp); |
/* |
* Note: we don't call the rcu callback here, since the only |
* layers that fall into the freelist are those that have been |
* preallocated. |
*/ |
kfree(p); |
} |
return; |
} |
EXPORT_SYMBOL(idr_remove); |
/** |
* idr_remove_all - remove all ids from the given idr tree |
* @idp: idr handle |
* |
* idr_destroy() only frees up unused, cached idp_layers, but this |
* function will remove all id mappings and leave all idp_layers |
* unused. |
* |
* A typical clean-up sequence for objects stored in an idr tree, will |
* use idr_for_each() to free all objects, if necessay, then |
* idr_remove_all() to remove all ids, and idr_destroy() to free |
* up the cached idr_layers. |
*/ |
void idr_remove_all(struct idr *idp) |
{ |
int n, id, max; |
struct idr_layer *p; |
struct idr_layer *pa[MAX_LEVEL]; |
struct idr_layer **paa = &pa[0]; |
n = idp->layers * IDR_BITS; |
p = idp->top; |
rcu_assign_pointer(idp->top, NULL); |
max = 1 << n; |
id = 0; |
while (id < max) { |
while (n > IDR_BITS && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = p->ary[(id >> n) & IDR_MASK]; |
} |
id += 1 << n; |
while (n < fls(id)) { |
if (p) |
free_layer(p); |
n += IDR_BITS; |
p = *--paa; |
} |
} |
idp->layers = 0; |
} |
EXPORT_SYMBOL(idr_remove_all); |
/** |
* idr_destroy - release all cached layers within an idr tree |
* idp: idr handle |
*/ |
void idr_destroy(struct idr *idp) |
{ |
while (idp->id_free_cnt) { |
struct idr_layer *p = get_from_free_list(idp); |
kfree(p); |
} |
} |
EXPORT_SYMBOL(idr_destroy); |
/** |
* idr_find - return pointer for given id |
* @idp: idr handle |
* @id: lookup key |
* |
* Return the pointer given the id it has been registered with. A %NULL |
* return indicates that @id is not valid or you passed %NULL in |
* idr_get_new(). |
* |
* This function can be called under rcu_read_lock(), given that the leaf |
* pointers lifetimes are correctly managed. |
*/ |
void *idr_find(struct idr *idp, int id) |
{ |
int n; |
struct idr_layer *p; |
p = rcu_dereference(idp->top); |
if (!p) |
return NULL; |
n = (p->layer+1) * IDR_BITS; |
/* Mask off upper bits we don't use for the search. */ |
id &= MAX_ID_MASK; |
if (id >= (1 << n)) |
return NULL; |
BUG_ON(n == 0); |
while (n > 0 && p) { |
n -= IDR_BITS; |
BUG_ON(n != p->layer*IDR_BITS); |
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
} |
return((void *)p); |
} |
EXPORT_SYMBOL(idr_find); |
#if 0 |
/** |
* idr_for_each - iterate through all stored pointers |
* @idp: idr handle |
* @fn: function to be called for each pointer |
* @data: data passed back to callback function |
* |
* Iterate over the pointers registered with the given idr. The |
* callback function will be called for each pointer currently |
* registered, passing the id, the pointer and the data pointer passed |
* to this function. It is not safe to modify the idr tree while in |
* the callback, so functions such as idr_get_new and idr_remove are |
* not allowed. |
* |
* We check the return of @fn each time. If it returns anything other |
* than 0, we break out and return that value. |
* |
* The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
*/ |
int idr_for_each(struct idr *idp, |
int (*fn)(int id, void *p, void *data), void *data) |
{ |
int n, id, max, error = 0; |
struct idr_layer *p; |
struct idr_layer *pa[MAX_LEVEL]; |
struct idr_layer **paa = &pa[0]; |
n = idp->layers * IDR_BITS; |
p = rcu_dereference(idp->top); |
max = 1 << n; |
id = 0; |
while (id < max) { |
while (n > 0 && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
} |
if (p) { |
error = fn(id, (void *)p, data); |
if (error) |
break; |
} |
id += 1 << n; |
while (n < fls(id)) { |
n += IDR_BITS; |
p = *--paa; |
} |
} |
return error; |
} |
EXPORT_SYMBOL(idr_for_each); |
/** |
* idr_get_next - lookup next object of id to given id. |
* @idp: idr handle |
* @id: pointer to lookup key |
* |
* Returns pointer to registered object with id, which is next number to |
* given id. |
*/ |
void *idr_get_next(struct idr *idp, int *nextidp) |
{ |
struct idr_layer *p, *pa[MAX_LEVEL]; |
struct idr_layer **paa = &pa[0]; |
int id = *nextidp; |
int n, max; |
/* find first ent */ |
n = idp->layers * IDR_BITS; |
max = 1 << n; |
p = rcu_dereference(idp->top); |
if (!p) |
return NULL; |
while (id < max) { |
while (n > 0 && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
} |
if (p) { |
*nextidp = id; |
return p; |
} |
id += 1 << n; |
while (n < fls(id)) { |
n += IDR_BITS; |
p = *--paa; |
} |
} |
return NULL; |
} |
/** |
* idr_replace - replace pointer for given id |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @id: lookup key |
* |
* Replace the pointer registered with an id and return the old value. |
* A -ENOENT return indicates that @id was not found. |
* A -EINVAL return indicates that @id was not within valid constraints. |
* |
* The caller must serialize with writers. |
*/ |
void *idr_replace(struct idr *idp, void *ptr, int id) |
{ |
int n; |
struct idr_layer *p, *old_p; |
p = idp->top; |
if (!p) |
return ERR_PTR(-EINVAL); |
n = (p->layer+1) * IDR_BITS; |
id &= MAX_ID_MASK; |
if (id >= (1 << n)) |
return ERR_PTR(-EINVAL); |
n -= IDR_BITS; |
while ((n > 0) && p) { |
p = p->ary[(id >> n) & IDR_MASK]; |
n -= IDR_BITS; |
} |
n = id & IDR_MASK; |
if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
return ERR_PTR(-ENOENT); |
old_p = p->ary[n]; |
rcu_assign_pointer(p->ary[n], ptr); |
return old_p; |
} |
EXPORT_SYMBOL(idr_replace); |
#endif |
void idr_init_cache(void) |
{ |
//idr_layer_cache = kmem_cache_create("idr_layer_cache", |
// sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
} |
/** |
* idr_init - initialize idr handle |
* @idp: idr handle |
* |
* This function is use to set up the handle (@idp) that you will pass |
* to the rest of the functions. |
*/ |
void idr_init(struct idr *idp) |
{ |
memset(idp, 0, sizeof(struct idr)); |
// spin_lock_init(&idp->lock); |
} |
EXPORT_SYMBOL(idr_init); |
#if 0 |
/* |
* IDA - IDR based ID allocator |
* |
* this is id allocator without id -> pointer translation. Memory |
* usage is much lower than full blown idr because each id only |
* occupies a bit. ida uses a custom leaf node which contains |
* IDA_BITMAP_BITS slots. |
* |
* 2007-04-25 written by Tejun Heo <htejun@gmail.com> |
*/ |
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
{ |
unsigned long flags; |
if (!ida->free_bitmap) { |
spin_lock_irqsave(&ida->idr.lock, flags); |
if (!ida->free_bitmap) { |
ida->free_bitmap = bitmap; |
bitmap = NULL; |
} |
spin_unlock_irqrestore(&ida->idr.lock, flags); |
} |
kfree(bitmap); |
} |
/** |
* ida_pre_get - reserve resources for ida allocation |
* @ida: ida handle |
* @gfp_mask: memory allocation flag |
* |
* This function should be called prior to locking and calling the |
* following function. It preallocates enough memory to satisfy the |
* worst possible allocation. |
* |
* If the system is REALLY out of memory this function returns 0, |
* otherwise 1. |
*/ |
int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
{ |
/* allocate idr_layers */ |
if (!idr_pre_get(&ida->idr, gfp_mask)) |
return 0; |
/* allocate free_bitmap */ |
if (!ida->free_bitmap) { |
struct ida_bitmap *bitmap; |
bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
if (!bitmap) |
return 0; |
free_bitmap(ida, bitmap); |
} |
return 1; |
} |
EXPORT_SYMBOL(ida_pre_get); |
/** |
* ida_get_new_above - allocate new ID above or equal to a start id |
* @ida: ida handle |
* @staring_id: id to start search at |
* @p_id: pointer to the allocated handle |
* |
* Allocate new ID above or equal to @ida. It should be called with |
* any required locks. |
* |
* If memory is required, it will return -EAGAIN, you should unlock |
* and go back to the ida_pre_get() call. If the ida is full, it will |
* return -ENOSPC. |
* |
* @p_id returns a value in the range @starting_id ... 0x7fffffff. |
*/ |
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
{ |
struct idr_layer *pa[MAX_LEVEL]; |
struct ida_bitmap *bitmap; |
unsigned long flags; |
int idr_id = starting_id / IDA_BITMAP_BITS; |
int offset = starting_id % IDA_BITMAP_BITS; |
int t, id; |
restart: |
/* get vacant slot */ |
t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
if (t < 0) |
return _idr_rc_to_errno(t); |
if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
return -ENOSPC; |
if (t != idr_id) |
offset = 0; |
idr_id = t; |
/* if bitmap isn't there, create a new one */ |
bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
if (!bitmap) { |
spin_lock_irqsave(&ida->idr.lock, flags); |
bitmap = ida->free_bitmap; |
ida->free_bitmap = NULL; |
spin_unlock_irqrestore(&ida->idr.lock, flags); |
if (!bitmap) |
return -EAGAIN; |
memset(bitmap, 0, sizeof(struct ida_bitmap)); |
rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
(void *)bitmap); |
pa[0]->count++; |
} |
/* lookup for empty slot */ |
t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
if (t == IDA_BITMAP_BITS) { |
/* no empty slot after offset, continue to the next chunk */ |
idr_id++; |
offset = 0; |
goto restart; |
} |
id = idr_id * IDA_BITMAP_BITS + t; |
if (id >= MAX_ID_BIT) |
return -ENOSPC; |
__set_bit(t, bitmap->bitmap); |
if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
idr_mark_full(pa, idr_id); |
*p_id = id; |
/* Each leaf node can handle nearly a thousand slots and the |
* whole idea of ida is to have small memory foot print. |
* Throw away extra resources one by one after each successful |
* allocation. |
*/ |
if (ida->idr.id_free_cnt || ida->free_bitmap) { |
struct idr_layer *p = get_from_free_list(&ida->idr); |
if (p) |
kmem_cache_free(idr_layer_cache, p); |
} |
return 0; |
} |
EXPORT_SYMBOL(ida_get_new_above); |
/** |
* ida_get_new - allocate new ID |
* @ida: idr handle |
* @p_id: pointer to the allocated handle |
* |
* Allocate new ID. It should be called with any required locks. |
* |
* If memory is required, it will return -EAGAIN, you should unlock |
* and go back to the idr_pre_get() call. If the idr is full, it will |
* return -ENOSPC. |
* |
* @id returns a value in the range 0 ... 0x7fffffff. |
*/ |
int ida_get_new(struct ida *ida, int *p_id) |
{ |
return ida_get_new_above(ida, 0, p_id); |
} |
EXPORT_SYMBOL(ida_get_new); |
/** |
* ida_remove - remove the given ID |
* @ida: ida handle |
* @id: ID to free |
*/ |
void ida_remove(struct ida *ida, int id) |
{ |
struct idr_layer *p = ida->idr.top; |
int shift = (ida->idr.layers - 1) * IDR_BITS; |
int idr_id = id / IDA_BITMAP_BITS; |
int offset = id % IDA_BITMAP_BITS; |
int n; |
struct ida_bitmap *bitmap; |
/* clear full bits while looking up the leaf idr_layer */ |
while ((shift > 0) && p) { |
n = (idr_id >> shift) & IDR_MASK; |
__clear_bit(n, &p->bitmap); |
p = p->ary[n]; |
shift -= IDR_BITS; |
} |
if (p == NULL) |
goto err; |
n = idr_id & IDR_MASK; |
__clear_bit(n, &p->bitmap); |
bitmap = (void *)p->ary[n]; |
if (!test_bit(offset, bitmap->bitmap)) |
goto err; |
/* update bitmap and remove it if empty */ |
__clear_bit(offset, bitmap->bitmap); |
if (--bitmap->nr_busy == 0) { |
__set_bit(n, &p->bitmap); /* to please idr_remove() */ |
idr_remove(&ida->idr, idr_id); |
free_bitmap(ida, bitmap); |
} |
return; |
err: |
printk(KERN_WARNING |
"ida_remove called for id=%d which is not allocated.\n", id); |
} |
EXPORT_SYMBOL(ida_remove); |
/** |
* ida_destroy - release all cached layers within an ida tree |
* ida: ida handle |
*/ |
void ida_destroy(struct ida *ida) |
{ |
idr_destroy(&ida->idr); |
kfree(ida->free_bitmap); |
} |
EXPORT_SYMBOL(ida_destroy); |
/** |
* ida_init - initialize ida handle |
* @ida: ida handle |
* |
* This function is use to set up the handle (@ida) that you will pass |
* to the rest of the functions. |
*/ |
void ida_init(struct ida *ida) |
{ |
memset(ida, 0, sizeof(struct ida)); |
idr_init(&ida->idr); |
} |
EXPORT_SYMBOL(ida_init); |
#endif |
/drivers/video/drm/include/drmP.h |
---|
67,6 → 67,9 |
__func__, ##args); \ |
} while (0) |
#define DRM_DEBUG(fmt, arg...) \ |
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##arg) |
#if 0 |
/***********************************************************************/ |
/drivers/video/drm/include/drm_crtc.h |
---|
25,10 → 25,10 |
#ifndef __DRM_CRTC_H__ |
#define __DRM_CRTC_H__ |
//#include <linux/i2c.h> |
#include <linux/i2c.h> |
//#include <linux/spinlock.h> |
//#include <linux/types.h> |
//#include <linux/idr.h> |
#include <linux/idr.h> |
//#include <linux/fb.h> |
/drivers/video/drm/include/drm_edid.h |
---|
193,97 → 193,6 |
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) |
#define KOBJ_NAME_LEN 20 |
#define I2C_NAME_SIZE 20 |
/* --- Defines for bit-adapters --------------------------------------- */ |
/* |
* This struct contains the hw-dependent functions of bit-style adapters to |
* manipulate the line states, and to init any hw-specific features. This is |
* only used if you have more than one hw-type of adapter running. |
*/ |
struct i2c_algo_bit_data { |
void *data; /* private data for lowlevel routines */ |
void (*setsda) (void *data, int state); |
void (*setscl) (void *data, int state); |
int (*getsda) (void *data); |
int (*getscl) (void *data); |
/* local settings */ |
int udelay; /* half clock cycle time in us, |
minimum 2 us for fast-mode I2C, |
minimum 5 us for standard-mode I2C and SMBus, |
maximum 50 us for SMBus */ |
int timeout; /* in jiffies */ |
}; |
struct i2c_client; |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
*/ |
struct i2c_adapter { |
// struct module *owner; |
unsigned int id; |
unsigned int class; |
// const struct i2c_algorithm *algo; /* the algorithm to access the bus */ |
void *algo_data; |
/* --- administration stuff. */ |
int (*client_register)(struct i2c_client *); |
int (*client_unregister)(struct i2c_client *); |
/* data fields that are valid for all devices */ |
u8 level; /* nesting level for lockdep */ |
// struct mutex bus_lock; |
// struct mutex clist_lock; |
int timeout; |
int retries; |
// struct device dev; /* the adapter device */ |
int nr; |
struct list_head clients; /* DEPRECATED */ |
char name[48]; |
// struct completion dev_released; |
}; |
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) |
struct i2c_client { |
unsigned short flags; /* div., see below */ |
unsigned short addr; /* chip address - NOTE: 7bit */ |
/* addresses are stored in the */ |
/* _LOWER_ 7 bits */ |
char name[I2C_NAME_SIZE]; |
struct i2c_adapter *adapter; /* the adapter we sit on */ |
// struct i2c_driver *driver; /* and our access routines */ |
// struct device dev; /* the device structure */ |
int irq; /* irq issued by device (or -1) */ |
char driver_name[KOBJ_NAME_LEN]; |
struct list_head list; /* DEPRECATED */ |
// struct completion released; |
}; |
#define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
int i2c_bit_add_bus(struct i2c_adapter *); |
int i2c_bit_add_numbered_bus(struct i2c_adapter *); |
struct i2c_msg { |
u16 addr; /* slave address */ |
u16 flags; |
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ |
#define I2C_M_RD 0x0001 /* read data, from slave to master */ |
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ |
u16 len; /* msg length */ |
u8 *buf; /* pointer to msg data */ |
}; |
#endif /* __DRM_EDID_H__ */ |
/drivers/video/drm/include/errno.h |
---|
0,0 → 1,111 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#include <errno-base.h> |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
#endif |
/drivers/video/drm/include/linux/bitops.h |
---|
0,0 → 1,191 |
#ifndef _LINUX_BITOPS_H |
#define _LINUX_BITOPS_H |
#define BIT(nr) (1UL << (nr)) |
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BITS_PER_BYTE 8 |
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
/* |
* Include this here because some architectures need generic_ffs/fls in |
* scope |
*/ |
#include <asm/bitops.h> |
#define for_each_bit(bit, addr, size) \ |
for ((bit) = find_first_bit((addr), (size)); \ |
(bit) < (size); \ |
(bit) = find_next_bit((addr), (size), (bit) + 1)) |
static __inline__ int get_bitmask_order(unsigned int count) |
{ |
int order; |
order = fls(count); |
return order; /* We could be slightly more clever with -1 here... */ |
} |
static __inline__ int get_count_order(unsigned int count) |
{ |
int order; |
order = fls(count) - 1; |
if (count & (count - 1)) |
order++; |
return order; |
} |
static inline unsigned long hweight_long(unsigned long w) |
{ |
return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
} |
/** |
* rol32 - rotate a 32-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u32 rol32(__u32 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (32 - shift)); |
} |
/** |
* ror32 - rotate a 32-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u32 ror32(__u32 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (32 - shift)); |
} |
/** |
* rol16 - rotate a 16-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u16 rol16(__u16 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (16 - shift)); |
} |
/** |
* ror16 - rotate a 16-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u16 ror16(__u16 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (16 - shift)); |
} |
/** |
* rol8 - rotate an 8-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u8 rol8(__u8 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (8 - shift)); |
} |
/** |
* ror8 - rotate an 8-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u8 ror8(__u8 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (8 - shift)); |
} |
static inline unsigned fls_long(unsigned long l) |
{ |
if (sizeof(l) == 4) |
return fls(l); |
return fls64(l); |
} |
/** |
* __ffs64 - find first set bit in a 64 bit word |
* @word: The 64 bit word |
* |
* On 64 bit arches this is a synomyn for __ffs |
* The result is not defined if no bits are set, so check that @word |
* is non-zero before calling this. |
*/ |
static inline unsigned long __ffs64(u64 word) |
{ |
#if BITS_PER_LONG == 32 |
if (((u32)word) == 0UL) |
return __ffs((u32)(word >> 32)) + 32; |
#elif BITS_PER_LONG != 64 |
#error BITS_PER_LONG not 32 or 64 |
#endif |
return __ffs((unsigned long)word); |
} |
#ifdef __KERNEL__ |
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
/** |
* find_first_bit - find the first set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first set bit. |
*/ |
extern unsigned long find_first_bit(const unsigned long *addr, |
unsigned long size); |
/** |
* find_first_zero_bit - find the first cleared bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first cleared bit. |
*/ |
extern unsigned long find_first_zero_bit(const unsigned long *addr, |
unsigned long size); |
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
#ifdef CONFIG_GENERIC_FIND_LAST_BIT |
/** |
* find_last_bit - find the last set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first set bit, or size. |
*/ |
extern unsigned long find_last_bit(const unsigned long *addr, |
unsigned long size); |
#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ |
#ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
/** |
* find_next_bit - find the next set bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
*/ |
extern unsigned long find_next_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
/** |
* find_next_zero_bit - find the next cleared bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
*/ |
extern unsigned long find_next_zero_bit(const unsigned long *addr, |
unsigned long size, |
unsigned long offset); |
#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ |
#endif /* __KERNEL__ */ |
#endif |
/drivers/video/drm/include/linux/i2c-algo-bit.h |
---|
0,0 → 1,51 |
/* ------------------------------------------------------------------------- */ |
/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-99 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even |
Frodo Looijaard <frodol@dds.nl> */ |
#ifndef _LINUX_I2C_ALGO_BIT_H |
#define _LINUX_I2C_ALGO_BIT_H |
/* --- Defines for bit-adapters --------------------------------------- */ |
/* |
* This struct contains the hw-dependent functions of bit-style adapters to |
* manipulate the line states, and to init any hw-specific features. This is |
* only used if you have more than one hw-type of adapter running. |
*/ |
struct i2c_algo_bit_data { |
void *data; /* private data for lowlevel routines */ |
void (*setsda) (void *data, int state); |
void (*setscl) (void *data, int state); |
int (*getsda) (void *data); |
int (*getscl) (void *data); |
/* local settings */ |
int udelay; /* half clock cycle time in us, |
minimum 2 us for fast-mode I2C, |
minimum 5 us for standard-mode I2C and SMBus, |
maximum 50 us for SMBus */ |
int timeout; /* in jiffies */ |
}; |
int i2c_bit_add_bus(struct i2c_adapter *); |
int i2c_bit_add_numbered_bus(struct i2c_adapter *); |
#endif /* _LINUX_I2C_ALGO_BIT_H */ |
/drivers/video/drm/include/linux/i2c.h |
---|
0,0 → 1,299 |
/* ------------------------------------------------------------------------- */ |
/* */ |
/* i2c.h - definitions for the i2c-bus interface */ |
/* */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-2000 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and |
Frodo Looijaard <frodol@dds.nl> */ |
#ifndef _LINUX_I2C_H |
#define _LINUX_I2C_H |
#include <types.h> |
#define I2C_NAME_SIZE 20 |
struct i2c_msg; |
struct i2c_algorithm; |
struct i2c_adapter; |
struct i2c_client; |
union i2c_smbus_data; |
/* Transfer num messages. |
*/ |
extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, |
int num); |
/** |
* struct i2c_client - represent an I2C slave device |
* @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; |
* I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking |
* @addr: Address used on the I2C bus connected to the parent adapter. |
* @name: Indicates the type of the device, usually a chip name that's |
* generic enough to hide second-sourcing and compatible revisions. |
* @adapter: manages the bus segment hosting this I2C device |
* @driver: device's driver, hence pointer to access routines |
* @dev: Driver model device node for the slave. |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
* userspace_devices list |
* |
* An i2c_client identifies a single device (i.e. chip) connected to an |
* i2c bus. The behaviour exposed to Linux is defined by the driver |
* managing the device. |
*/ |
struct i2c_client { |
unsigned short flags; /* div., see below */ |
unsigned short addr; /* chip address - NOTE: 7bit */ |
/* addresses are stored in the */ |
/* _LOWER_ 7 bits */ |
char name[I2C_NAME_SIZE]; |
struct i2c_adapter *adapter; /* the adapter we sit on */ |
// struct i2c_driver *driver; /* and our access routines */ |
// struct device dev; /* the device structure */ |
int irq; /* irq issued by device (or -1) */ |
struct list_head detected; |
}; |
#define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
/* |
* The following structs are for those who like to implement new bus drivers: |
* i2c_algorithm is the interface to a class of hardware solutions which can |
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 |
* to name two of the most common. |
*/ |
struct i2c_algorithm { |
/* If an adapter algorithm can't do I2C-level access, set master_xfer |
to NULL. If an adapter algorithm can do SMBus access, set |
smbus_xfer. If set to NULL, the SMBus protocol is simulated |
using common I2C messages */ |
/* master_xfer should return the number of messages successfully |
processed, or a negative value on error */ |
int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, |
int num); |
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, |
unsigned short flags, char read_write, |
u8 command, int size, union i2c_smbus_data *data); |
/* To determine what the adapter supports */ |
u32 (*functionality) (struct i2c_adapter *); |
}; |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
*/ |
struct i2c_adapter { |
unsigned int id; |
unsigned int class; /* classes to allow probing for */ |
const struct i2c_algorithm *algo; /* the algorithm to access the bus */ |
void *algo_data; |
/* data fields that are valid for all devices */ |
u8 level; /* nesting level for lockdep */ |
int timeout; /* in jiffies */ |
int retries; |
// struct device dev; /* the adapter device */ |
int nr; |
char name[48]; |
}; |
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) |
/*flags for the client struct: */ |
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ |
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ |
/* Must equal I2C_M_TEN below */ |
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ |
/* i2c adapter classes (bitmask) */ |
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ |
#define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ |
#define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ |
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ |
/* i2c_client_address_data is the struct for holding default client |
* addresses for a driver and for the parameters supplied on the |
* command line |
*/ |
struct i2c_client_address_data { |
const unsigned short *normal_i2c; |
const unsigned short *probe; |
const unsigned short *ignore; |
const unsigned short * const *forces; |
}; |
/* Internal numbers to terminate lists */ |
#define I2C_CLIENT_END 0xfffeU |
/* The numbers to use to set I2C bus address */ |
#define ANY_I2C_BUS 0xffff |
/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ |
#define I2C_ADDRS(addr, addrs...) \ |
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) |
/** |
* struct i2c_msg - an I2C transaction segment beginning with START |
* @addr: Slave address, either seven or ten bits. When this is a ten |
* bit address, I2C_M_TEN must be set in @flags and the adapter |
* must support I2C_FUNC_10BIT_ADDR. |
* @flags: I2C_M_RD is handled by all adapters. No other flags may be |
* provided unless the adapter exported the relevant I2C_FUNC_* |
* flags through i2c_check_functionality(). |
* @len: Number of data bytes in @buf being read from or written to the |
* I2C slave address. For read transactions where I2C_M_RECV_LEN |
* is set, the caller guarantees that this buffer can hold up to |
* 32 bytes in addition to the initial length byte sent by the |
* slave (plus, if used, the SMBus PEC); and this value will be |
* incremented by the number of block data bytes received. |
* @buf: The buffer into which data is read, or from which it's written. |
* |
* An i2c_msg is the low level representation of one segment of an I2C |
* transaction. It is visible to drivers in the @i2c_transfer() procedure, |
* to userspace from i2c-dev, and to I2C adapter drivers through the |
* @i2c_adapter.@master_xfer() method. |
* |
* Except when I2C "protocol mangling" is used, all I2C adapters implement |
* the standard rules for I2C transactions. Each transaction begins with a |
* START. That is followed by the slave address, and a bit encoding read |
* versus write. Then follow all the data bytes, possibly including a byte |
* with SMBus PEC. The transfer terminates with a NAK, or when all those |
* bytes have been transferred and ACKed. If this is the last message in a |
* group, it is followed by a STOP. Otherwise it is followed by the next |
* @i2c_msg transaction segment, beginning with a (repeated) START. |
* |
* Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then |
* passing certain @flags may have changed those standard protocol behaviors. |
* Those flags are only for use with broken/nonconforming slaves, and with |
* adapters which are known to support the specific mangling options they |
* need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR). |
*/ |
struct i2c_msg { |
u16 addr; /* slave address */ |
u16 flags; |
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ |
#define I2C_M_RD 0x0001 /* read data, from slave to master */ |
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ |
u16 len; /* msg length */ |
u8 *buf; /* pointer to msg data */ |
}; |
/* To determine what functionality is present */ |
#define I2C_FUNC_I2C 0x00000001 |
#define I2C_FUNC_10BIT_ADDR 0x00000002 |
#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */ |
#define I2C_FUNC_SMBUS_PEC 0x00000008 |
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ |
#define I2C_FUNC_SMBUS_QUICK 0x00010000 |
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 |
#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 |
#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 |
#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 |
#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 |
#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 |
#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 |
#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 |
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 |
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ |
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ |
#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ |
I2C_FUNC_SMBUS_WRITE_BYTE) |
#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \ |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA) |
#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \ |
I2C_FUNC_SMBUS_WRITE_WORD_DATA) |
#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) |
#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \ |
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) |
#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \ |
I2C_FUNC_SMBUS_BYTE | \ |
I2C_FUNC_SMBUS_BYTE_DATA | \ |
I2C_FUNC_SMBUS_WORD_DATA | \ |
I2C_FUNC_SMBUS_PROC_CALL | \ |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \ |
I2C_FUNC_SMBUS_I2C_BLOCK | \ |
I2C_FUNC_SMBUS_PEC) |
/* |
* Data for SMBus Messages |
*/ |
#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */ |
union i2c_smbus_data { |
__u8 byte; |
__u16 word; |
__u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */ |
/* and one more for user-space compatibility */ |
}; |
/* i2c_smbus_xfer read or write markers */ |
#define I2C_SMBUS_READ 1 |
#define I2C_SMBUS_WRITE 0 |
/* SMBus transaction types (size parameter in the above functions) |
Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */ |
#define I2C_SMBUS_QUICK 0 |
#define I2C_SMBUS_BYTE 1 |
#define I2C_SMBUS_BYTE_DATA 2 |
#define I2C_SMBUS_WORD_DATA 3 |
#define I2C_SMBUS_PROC_CALL 4 |
#define I2C_SMBUS_BLOCK_DATA 5 |
#define I2C_SMBUS_I2C_BLOCK_BROKEN 6 |
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ |
#define I2C_SMBUS_I2C_BLOCK_DATA 8 |
#endif /* _LINUX_I2C_H */ |
/drivers/video/drm/include/linux/idr.h |
---|
0,0 → 1,144 |
/* |
* include/linux/idr.h |
* |
* 2002-10-18 written by Jim Houston jim.houston@ccur.com |
* Copyright (C) 2002 by Concurrent Computer Corporation |
* Distributed under the GNU GPL license version 2. |
* |
* Small id to pointer translation service avoiding fixed sized |
* tables. |
*/ |
#ifndef __IDR_H__ |
#define __IDR_H__ |
#include <types.h> |
#include <errno-base.h> |
//#include <linux/bitops.h> |
//#include <linux/init.h> |
//#include <linux/rcupdate.h> |
struct rcu_head { |
struct rcu_head *next; |
void (*func)(struct rcu_head *head); |
}; |
# define IDR_BITS 5 |
# define IDR_FULL 0xfffffffful |
/* We can only use two of the bits in the top level because there is |
only one possible bit in the top level (5 bits * 7 levels = 35 |
bits, but you only use 31 bits in the id). */ |
# define TOP_LEVEL_FULL (IDR_FULL >> 30) |
#define IDR_SIZE (1 << IDR_BITS) |
#define IDR_MASK ((1 << IDR_BITS)-1) |
#define MAX_ID_SHIFT (sizeof(int)*8 - 1) |
#define MAX_ID_BIT (1U << MAX_ID_SHIFT) |
#define MAX_ID_MASK (MAX_ID_BIT - 1) |
/* Leave the possibility of an incomplete final layer */ |
#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS |
/* Number of id_layer structs to leave in free list */ |
#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL |
struct idr_layer { |
unsigned long bitmap; /* A zero bit means "space here" */ |
struct idr_layer *ary[1<<IDR_BITS]; |
int count; /* When zero, we can release it */ |
int layer; /* distance from leaf */ |
struct rcu_head rcu_head; |
}; |
struct idr { |
struct idr_layer *top; |
struct idr_layer *id_free; |
int layers; /* only valid without concurrent changes */ |
int id_free_cnt; |
// spinlock_t lock; |
}; |
#define IDR_INIT(name) \ |
{ \ |
.top = NULL, \ |
.id_free = NULL, \ |
.layers = 0, \ |
.id_free_cnt = 0, \ |
// .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
} |
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
/* Actions to be taken after a call to _idr_sub_alloc */ |
#define IDR_NEED_TO_GROW -2 |
#define IDR_NOMORE_SPACE -3 |
#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) |
/** |
* idr synchronization (stolen from radix-tree.h) |
* |
* idr_find() is able to be called locklessly, using RCU. The caller must |
* ensure calls to this function are made within rcu_read_lock() regions. |
* Other readers (lock-free or otherwise) and modifications may be running |
* concurrently. |
* |
* It is still required that the caller manage the synchronization and |
* lifetimes of the items. So if RCU lock-free lookups are used, typically |
* this would mean that the items have their own locks, or are amenable to |
* lock-free access; and that the items are freed by RCU (or only freed after |
* having been deleted from the idr tree *and* a synchronize_rcu() grace |
* period). |
*/ |
/* |
* This is what we export. |
*/ |
void *idr_find(struct idr *idp, int id); |
int idr_pre_get(struct idr *idp, u32_t gfp_mask); |
int idr_get_new(struct idr *idp, void *ptr, int *id); |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
int idr_for_each(struct idr *idp, |
int (*fn)(int id, void *p, void *data), void *data); |
void *idr_get_next(struct idr *idp, int *nextid); |
void *idr_replace(struct idr *idp, void *ptr, int id); |
void idr_remove(struct idr *idp, int id); |
void idr_remove_all(struct idr *idp); |
void idr_destroy(struct idr *idp); |
void idr_init(struct idr *idp); |
/* |
* IDA - IDR based id allocator, use when translation from id to |
* pointer isn't necessary. |
*/ |
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
#define IDA_BITMAP_LONGS (128 / sizeof(long) - 1) |
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
struct ida_bitmap { |
long nr_busy; |
unsigned long bitmap[IDA_BITMAP_LONGS]; |
}; |
struct ida { |
struct idr idr; |
struct ida_bitmap *free_bitmap; |
}; |
#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } |
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) |
int ida_pre_get(struct ida *ida, u32_t gfp_mask); |
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); |
int ida_get_new(struct ida *ida, int *p_id); |
void ida_remove(struct ida *ida, int id); |
void ida_destroy(struct ida *ida); |
void ida_init(struct ida *ida); |
void idr_init_cache(void); |
#endif /* __IDR_H__ */ |
/drivers/video/drm/include/types.h |
---|
86,9 → 86,6 |
#define DRM_INFO(fmt, arg...) dbgprintf("DRM: "fmt , ##arg) |
#define DRM_DEBUG(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
#define DRM_ERROR(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
230,24 → 227,6 |
#define EXPORT_SYMBOL(x) |
#define IDR_BITS 5 |
#define IDR_FULL 0xfffffffful |
struct idr_layer { |
unsigned long bitmap; /* A zero bit means "space here" */ |
struct idr_layer *ary[1<<IDR_BITS]; |
int count; /* When zero, we can release it */ |
}; |
struct idr { |
struct idr_layer *top; |
struct idr_layer *id_free; |
int layers; |
int id_free_cnt; |
// spinlock_t lock; |
}; |
#define min(x,y) ({ \ |
typeof(x) _x = (x); \ |
typeof(y) _y = (y); \ |
292,7 → 271,10 |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kmalloc(n * size, 0); |
return kzalloc(n * size, 0); |
} |
#define ENTRY() dbgprintf("entry %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
#endif //__TYPES_H__ |
/drivers/video/drm/radeon/atikms.lds |
---|
0,0 → 1,55 |
OUTPUT_FORMAT(pei-i386) |
ENTRY("_drvEntry") |
SECTIONS |
{ |
. = SIZEOF_HEADERS; |
. = ALIGN(__section_alignment__); |
.text __image_base__ + ( __section_alignment__ < 0x1000 ? . : __section_alignment__ ) : |
{ |
*(.text) *(.rdata) |
} |
.data ALIGN(__section_alignment__) : |
{ |
*(.data) |
} |
.reloc ALIGN(__section_alignment__) : |
{ |
*(.reloc) |
} |
.idata ALIGN(__section_alignment__): |
{ |
SORT(*)(.idata$2) |
SORT(*)(.idata$3) |
/* These zeroes mark the end of the import list. */ |
LONG (0); LONG (0); LONG (0); LONG (0); LONG (0); |
SORT(*)(.idata$4) |
SORT(*)(.idata$5) |
SORT(*)(.idata$6) |
SORT(*)(.idata$7) |
} |
.bss ALIGN(__section_alignment__): |
{ |
*(.bss) |
*(COMMON) |
} |
/DISCARD/ : |
{ |
*(.debug$S) |
*(.debug$T) |
*(.debug$F) |
*(.drectve) |
*(.edata) |
} |
} |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
549,6 → 549,9 |
radeon_crtc->crtc_offset = |
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; |
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); |
dbgprintf("done %s\n",__FUNCTION__); |
} |
void radeon_init_disp_bw_avivo(struct drm_device *dev, |
/drivers/video/drm/radeon/makefile |
---|
0,0 → 1,78 |
CC = gcc |
FASM = e:/fasm/fasm.exe |
CFLAGS = -c -O2 -fomit-frame-pointer -fno-builtin-printf |
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0 --file-alignment 512 --section-alignment 4096 |
DRM_TOPDIR = $(CURDIR)/.. |
DRM_INCLUDES = $(DRM_TOPDIR)/include |
LIBPATH:= . |
LIBS:= -ldrv -lcore |
NAME:= atikms |
INCLUDES = -I $(DRM_INCLUDES) -I $(DRM_INCLUDES)/ttm |
HFILES:= $(DRM_INCLUDES)/types.h \ |
$(DRM_INCLUDES)/list.h \ |
$(DRM_INCLUDES)/pci.h \ |
$(DRM_INCLUDES)/drm.h \ |
$(DRM_INCLUDES)/drmP.h \ |
$(DRM_INCLUDES)/drm_edid.h \ |
$(DRM_INCLUDES)/drm_crtc.h \ |
$(DRM_INCLUDES)/drm_mode.h \ |
$(DRM_INCLUDES)/drm_mm.h \ |
atom.h \ |
radeon.h \ |
radeon_asic.h |
NAME_SRC= \ |
pci.c \ |
$(DRM_TOPDIR)/drm_mm.c \ |
$(DRM_TOPDIR)/drm_edid.c \ |
$(DRM_TOPDIR)/drm_modes.c \ |
$(DRM_TOPDIR)/drm_crtc.c \ |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/i2c/i2c-core.c \ |
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \ |
$(DRM_TOPDIR)/idr.c \ |
radeon_device.c \ |
radeon_clocks.c \ |
radeon_i2c.c \ |
atom.c \ |
radeon_atombios.c \ |
atombios_crtc.c \ |
radeon_encoders.c \ |
radeon_connectors.c \ |
radeon_bios.c \ |
radeon_combios.c \ |
radeon_legacy_crtc.c \ |
radeon_legacy_encoders.c \ |
radeon_display.c \ |
radeon_object.c \ |
radeon_gart.c \ |
radeon_ring.c \ |
r100.c \ |
r300.c \ |
rv515.c \ |
r520.c |
SRC_DEP:= |
NAME_OBJS = $(patsubst %.s, %.obj, $(patsubst %.asm, %.obj,\ |
$(patsubst %.c, %.obj, $(NAME_SRC)))) |
all: $(NAME).dll |
$(NAME).dll: $(NAME_OBJS) $(SRC_DEP) $(HFILES) atikms.lds Makefile |
ld -L$(LIBPATH) $(LDFLAGS) -T atikms.lds -o $@ $(NAME_OBJS) vsprintf.obj icompute.obj $(LIBS) |
%.obj : %.c $(HFILES) Makefile |
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ -c $< |
/drivers/video/drm/radeon/r100.c |
---|
26,8 → 26,8 |
* Jerome Glisse |
*/ |
//#include <linux/seq_file.h> |
//#include "drmP.h" |
//#include "drm.h" |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_drm.h" |
#include "radeon_microcode.h" |
#include "radeon_reg.h" |
/drivers/video/drm/radeon/r300.c |
---|
26,8 → 26,8 |
* Jerome Glisse |
*/ |
//#include <linux/seq_file.h> |
//#include "drmP.h" |
//#include "drm.h" |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
/drivers/video/drm/radeon/r520.c |
---|
25,7 → 25,7 |
* Alex Deucher |
* Jerome Glisse |
*/ |
//#include "drmP.h" |
#include "drmP.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
501,4 → 501,4 |
//domodedovo 9-00 16/07/2009 |
/drivers/video/drm/radeon/radeon_asic.h |
---|
403,8 → 403,8 |
.gpu_reset = &rv515_gpu_reset, |
.mc_init = &r520_mc_init, |
.mc_fini = &r520_mc_fini, |
.wb_init = &r100_wb_init, |
.wb_fini = &r100_wb_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_enable = &r300_gart_enable, |
.gart_disable = &rv370_pcie_gart_disable, |
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
/drivers/video/drm/radeon/radeon_atombios.c |
---|
447,7 → 447,7 |
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; |
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); |
ENTRY(); |
supported_devices = |
(union atom_supported_devices *)(ctx->bios + data_offset); |
596,7 → 596,7 |
} |
radeon_link_encoder_connector(dev); |
LEAVE(); |
return true; |
} |
/drivers/video/drm/radeon/radeon_device.c |
---|
37,11 → 37,9 |
#include <syscall.h> |
int radeon_modeset = -1; |
int radeon_dynclks = -1; |
int radeon_r4xx_atom = 0; |
int radeon_agpmode = 0; |
int radeon_vram_limit = 0; |
int radeon_agpmode = -1; |
int radeon_gart_size = 512; /* default gart size */ |
int radeon_benchmarking = 0; |
int radeon_connector_table = 0; |
517,7 → 515,6 |
if (r) { |
return r; |
} |
// r = radeon_init(rdev); |
r = rdev->asic->init(rdev); |
639,14 → 636,15 |
if (!r) { |
r = radeon_cp_init(rdev, 1024 * 1024); |
} |
if (!r) { |
r = radeon_wb_init(rdev); |
if (r) { |
DRM_ERROR("radeon: failled initializing WB (%d).\n", r); |
return r; |
} |
} |
// if (!r) { |
// r = radeon_wb_init(rdev); |
// if (r) { |
// DRM_ERROR("radeon: failled initializing WB (%d).\n", r); |
// return r; |
// } |
// } |
#if 0 |
if (!r) { |
r = radeon_ib_pool_init(rdev); |
if (r) { |
654,8 → 652,6 |
return r; |
} |
} |
#if 0 |
if (!r) { |
r = radeon_ib_test(rdev); |
if (r) { |
663,14 → 659,16 |
return r; |
} |
} |
#endif |
ret = r; |
r = radeon_modeset_init(rdev); |
if (r) { |
return r; |
} |
if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { |
rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; |
} |
// if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { |
// rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; |
// } |
if (!ret) { |
DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
} |
678,9 → 676,7 |
// radeon_benchmark(rdev); |
// } |
#endif |
return ret; |
return -1; |
} |
static struct pci_device_id pciidlist[] = { |
/drivers/video/drm/radeon/radeon_display.c |
---|
179,6 → 179,8 |
struct radeon_crtc *radeon_crtc; |
int i; |
ENTRY(); |
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
if (radeon_crtc == NULL) |
return; |
202,6 → 204,8 |
radeon_atombios_init_crtc(dev, radeon_crtc); |
else |
radeon_legacy_init_crtc(dev, radeon_crtc); |
LEAVE(); |
} |
static const char *encoder_names[34] = { |
318,6 → 322,8 |
struct drm_connector *drm_connector; |
bool ret = false; |
ENTRY(); |
if (rdev->bios) { |
if (rdev->is_atom_bios) { |
if (rdev->family >= CHIP_R600) |
335,6 → 341,7 |
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) |
radeon_ddc_dump(drm_connector); |
} |
LEAVE(); |
return ret; |
} |
584,6 → 591,8 |
.create_handle = radeon_user_framebuffer_create_handle, |
}; |
#endif |
struct drm_framebuffer * |
radeon_framebuffer_create(struct drm_device *dev, |
struct drm_mode_fb_cmd *mode_cmd, |
595,8 → 604,8 |
if (radeon_fb == NULL) { |
return NULL; |
} |
drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); |
drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); |
// drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); |
// drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); |
radeon_fb->obj = obj; |
return &radeon_fb->base; |
} |
608,20 → 617,25 |
{ |
struct drm_gem_object *obj; |
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); |
return NULL; |
return radeon_framebuffer_create(dev, mode_cmd, obj); |
// obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); |
// |
// return radeon_framebuffer_create(dev, mode_cmd, obj); |
} |
static const struct drm_mode_config_funcs radeon_mode_funcs = { |
.fb_create = radeon_user_framebuffer_create, |
.fb_changed = radeonfb_probe, |
// .fb_create = radeon_user_framebuffer_create, |
// .fb_changed = radeonfb_probe, |
}; |
#endif |
int radeon_modeset_init(struct radeon_device *rdev) |
{ |
dbgprintf("%s\n",__FUNCTION__); |
int num_crtc = 2, i; |
int ret; |
628,7 → 642,7 |
drm_mode_config_init(rdev->ddev); |
rdev->mode_info.mode_config_initialized = true; |
// rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; |
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; |
if (ASIC_IS_AVIVO(rdev)) { |
rdev->ddev->mode_config.max_width = 8192; |
651,6 → 665,9 |
return ret; |
} |
drm_helper_initial_config(rdev->ddev); |
dbgprintf("done %s\n",__FUNCTION__); |
return 0; |
} |
/drivers/video/drm/radeon/radeon_encoders.c |
---|
0,0 → 1,1714 |
/* |
* Copyright 2007-8 Advanced Micro Devices, Inc. |
* Copyright 2008 Red Hat Inc. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
* |
* Authors: Dave Airlie |
* Alex Deucher |
*/ |
#include "drmP.h" |
//#include <types.h> |
//#include <list.h> |
//#include <syscall.h> |
#include "drm_crtc.h" |
#include "drm_crtc_helper.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
#include "atom.h" |
extern int atom_debug; |
uint32_t |
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
{ |
struct radeon_device *rdev = dev->dev_private; |
uint32_t ret = 0; |
switch (supported_device) { |
case ATOM_DEVICE_CRT1_SUPPORT: |
case ATOM_DEVICE_TV1_SUPPORT: |
case ATOM_DEVICE_TV2_SUPPORT: |
case ATOM_DEVICE_CRT2_SUPPORT: |
case ATOM_DEVICE_CV_SUPPORT: |
switch (dac) { |
case 1: /* dac a */ |
if ((rdev->family == CHIP_RS300) || |
(rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) |
ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; |
else if (ASIC_IS_AVIVO(rdev)) |
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; |
else |
ret = ENCODER_OBJECT_ID_INTERNAL_DAC1; |
break; |
case 2: /* dac b */ |
if (ASIC_IS_AVIVO(rdev)) |
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; |
else { |
/*if (rdev->family == CHIP_R200) |
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; |
else*/ |
ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; |
} |
break; |
case 3: /* external dac */ |
if (ASIC_IS_AVIVO(rdev)) |
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; |
else |
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; |
break; |
} |
break; |
case ATOM_DEVICE_LCD1_SUPPORT: |
if (ASIC_IS_AVIVO(rdev)) |
ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; |
else |
ret = ENCODER_OBJECT_ID_INTERNAL_LVDS; |
break; |
case ATOM_DEVICE_DFP1_SUPPORT: |
if ((rdev->family == CHIP_RS300) || |
(rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) |
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; |
else if (ASIC_IS_AVIVO(rdev)) |
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; |
else |
ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1; |
break; |
case ATOM_DEVICE_LCD2_SUPPORT: |
case ATOM_DEVICE_DFP2_SUPPORT: |
if ((rdev->family == CHIP_RS600) || |
(rdev->family == CHIP_RS690) || |
(rdev->family == CHIP_RS740)) |
ret = ENCODER_OBJECT_ID_INTERNAL_DDI; |
else if (ASIC_IS_AVIVO(rdev)) |
ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; |
else |
ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; |
break; |
case ATOM_DEVICE_DFP3_SUPPORT: |
ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; |
break; |
} |
return ret; |
} |
void |
radeon_link_encoder_connector(struct drm_device *dev) |
{ |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
struct drm_encoder *encoder; |
struct radeon_encoder *radeon_encoder; |
/* walk the list and link encoders to connectors */ |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
radeon_connector = to_radeon_connector(connector); |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->devices & radeon_connector->devices) |
drm_mode_connector_attach_encoder(connector, encoder); |
} |
} |
} |
static struct drm_connector * |
radeon_get_connector_for_encoder(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
radeon_connector = to_radeon_connector(connector); |
if (radeon_encoder->devices & radeon_connector->devices) |
return connector; |
} |
return NULL; |
} |
/* used for both atom and legacy */ |
void radeon_rmx_mode_fixup(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; |
if (mode->hdisplay < native_mode->panel_xres || |
mode->vdisplay < native_mode->panel_yres) { |
radeon_encoder->flags |= RADEON_USE_RMX; |
if (ASIC_IS_AVIVO(rdev)) { |
adjusted_mode->hdisplay = native_mode->panel_xres; |
adjusted_mode->vdisplay = native_mode->panel_yres; |
adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank; |
adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus; |
adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width; |
adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank; |
adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus; |
adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width; |
/* update crtc values */ |
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
/* adjust crtc values */ |
adjusted_mode->crtc_hdisplay = native_mode->panel_xres; |
adjusted_mode->crtc_vdisplay = native_mode->panel_yres; |
adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank; |
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus; |
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width; |
adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank; |
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus; |
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width; |
} else { |
adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank; |
adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus; |
adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width; |
adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank; |
adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus; |
adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width; |
/* update crtc values */ |
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
/* adjust crtc values */ |
adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank; |
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus; |
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width; |
adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank; |
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus; |
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width; |
} |
adjusted_mode->flags = native_mode->flags; |
adjusted_mode->clock = native_mode->dotclock; |
} |
} |
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
radeon_encoder->flags &= ~RADEON_USE_RMX; |
drm_mode_set_crtcinfo(adjusted_mode, 0); |
if (radeon_encoder->rmx_type != RMX_OFF) |
radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); |
/* hw bug */ |
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) |
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) |
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
return true; |
} |
static void |
atombios_dac_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
DAC_ENCODER_CONTROL_PS_ALLOCATION args; |
int index = 0, num = 0; |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
memset(&args, 0, sizeof(args)); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); |
num = 1; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); |
num = 2; |
break; |
} |
args.ucAction = action; |
if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) |
args.ucDacStandard = ATOM_DAC1_PS2; |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.ucDacStandard = ATOM_DAC1_CV; |
else { |
switch (tv_std) { |
case TV_STD_PAL: |
case TV_STD_PAL_M: |
case TV_STD_SCART_PAL: |
case TV_STD_SECAM: |
case TV_STD_PAL_CN: |
args.ucDacStandard = ATOM_DAC1_PAL; |
break; |
case TV_STD_NTSC: |
case TV_STD_NTSC_J: |
case TV_STD_PAL_60: |
default: |
args.ucDacStandard = ATOM_DAC1_NTSC; |
break; |
} |
} |
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void |
atombios_tv_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
TV_ENCODER_CONTROL_PS_ALLOCATION args; |
int index = 0; |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
memset(&args, 0, sizeof(args)); |
index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); |
args.sTVEncoder.ucAction = action; |
if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.sTVEncoder.ucTvStandard = ATOM_TV_CV; |
else { |
switch (tv_std) { |
case TV_STD_NTSC: |
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; |
break; |
case TV_STD_PAL: |
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; |
break; |
case TV_STD_PAL_M: |
args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; |
break; |
case TV_STD_PAL_60: |
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; |
break; |
case TV_STD_NTSC_J: |
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; |
break; |
case TV_STD_SCART_PAL: |
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ |
break; |
case TV_STD_SECAM: |
args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; |
break; |
case TV_STD_PAL_CN: |
args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; |
break; |
default: |
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; |
break; |
} |
} |
args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
void |
atombios_external_tmds_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; |
int index = 0; |
memset(&args, 0, sizeof(args)); |
index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); |
args.sXTmdsEncoder.ucEnable = action; |
if (radeon_encoder->pixel_clock > 165000) |
args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL; |
/*if (pScrn->rgbBits == 8)*/ |
args.sXTmdsEncoder.ucMisc |= (1 << 1); |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void |
atombios_ddia_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
DVO_ENCODER_CONTROL_PS_ALLOCATION args; |
int index = 0; |
memset(&args, 0, sizeof(args)); |
index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); |
args.sDVOEncoder.ucAction = action; |
args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
if (radeon_encoder->pixel_clock > 165000) |
args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
union lvds_encoder_control { |
LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; |
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; |
}; |
static void |
atombios_digital_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
union lvds_encoder_control args; |
int index = 0; |
uint8_t frev, crev; |
struct radeon_encoder_atom_dig *dig; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
struct radeon_connector_atom_dig *dig_connector; |
connector = radeon_get_connector_for_encoder(encoder); |
if (!connector) |
return; |
radeon_connector = to_radeon_connector(connector); |
if (!radeon_encoder->enc_priv) |
return; |
dig = radeon_encoder->enc_priv; |
if (!radeon_connector->con_priv) |
return; |
dig_connector = radeon_connector->con_priv; |
memset(&args, 0, sizeof(args)); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); |
break; |
} |
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
switch (frev) { |
case 1: |
case 2: |
switch (crev) { |
case 1: |
args.v1.ucMisc = 0; |
args.v1.ucAction = action; |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
if (dig->lvds_misc & (1 << 0)) |
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
if (dig->lvds_misc & (1 << 1)) |
args.v1.ucMisc |= (1 << 1); |
} else { |
if (dig_connector->linkb) |
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
if (radeon_encoder->pixel_clock > 165000) |
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
/*if (pScrn->rgbBits == 8) */ |
args.v1.ucMisc |= (1 << 1); |
} |
break; |
case 2: |
case 3: |
args.v2.ucMisc = 0; |
args.v2.ucAction = action; |
if (crev == 3) { |
if (dig->coherent_mode) |
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; |
} |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
args.v2.ucTruncate = 0; |
args.v2.ucSpatial = 0; |
args.v2.ucTemporal = 0; |
args.v2.ucFRC = 0; |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
if (dig->lvds_misc & (1 << 0)) |
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
if (dig->lvds_misc & (1 << 5)) { |
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; |
if (dig->lvds_misc & (1 << 1)) |
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; |
} |
if (dig->lvds_misc & (1 << 6)) { |
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; |
if (dig->lvds_misc & (1 << 1)) |
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; |
if (((dig->lvds_misc >> 2) & 0x3) == 2) |
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; |
} |
} else { |
if (dig_connector->linkb) |
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
if (radeon_encoder->pixel_clock > 165000) |
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
} |
break; |
default: |
DRM_ERROR("Unknown table version %d, %d\n", frev, crev); |
break; |
} |
break; |
default: |
DRM_ERROR("Unknown table version %d, %d\n", frev, crev); |
break; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
int |
atombios_get_encoder_mode(struct drm_encoder *encoder) |
{ |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
connector = radeon_get_connector_for_encoder(encoder); |
if (!connector) |
return 0; |
radeon_connector = to_radeon_connector(connector); |
switch (connector->connector_type) { |
case DRM_MODE_CONNECTOR_DVII: |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
return ATOM_ENCODER_MODE_HDMI; |
else if (radeon_connector->use_digital) |
return ATOM_ENCODER_MODE_DVI; |
else |
return ATOM_ENCODER_MODE_CRT; |
break; |
case DRM_MODE_CONNECTOR_DVID: |
case DRM_MODE_CONNECTOR_HDMIA: |
case DRM_MODE_CONNECTOR_HDMIB: |
default: |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
return ATOM_ENCODER_MODE_HDMI; |
else |
return ATOM_ENCODER_MODE_DVI; |
break; |
case DRM_MODE_CONNECTOR_LVDS: |
return ATOM_ENCODER_MODE_LVDS; |
break; |
case DRM_MODE_CONNECTOR_DisplayPort: |
/*if (radeon_output->MonType == MT_DP) |
return ATOM_ENCODER_MODE_DP; |
else*/ |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
return ATOM_ENCODER_MODE_HDMI; |
else |
return ATOM_ENCODER_MODE_DVI; |
break; |
case CONNECTOR_DVI_A: |
case CONNECTOR_VGA: |
return ATOM_ENCODER_MODE_CRT; |
break; |
case CONNECTOR_STV: |
case CONNECTOR_CTV: |
case CONNECTOR_DIN: |
/* fix me */ |
return ATOM_ENCODER_MODE_TV; |
/*return ATOM_ENCODER_MODE_CV;*/ |
break; |
} |
} |
static void |
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
DIG_ENCODER_CONTROL_PS_ALLOCATION args; |
int index = 0, num = 0; |
uint8_t frev, crev; |
struct radeon_encoder_atom_dig *dig; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
struct radeon_connector_atom_dig *dig_connector; |
connector = radeon_get_connector_for_encoder(encoder); |
if (!connector) |
return; |
radeon_connector = to_radeon_connector(connector); |
if (!radeon_connector->con_priv) |
return; |
dig_connector = radeon_connector->con_priv; |
if (!radeon_encoder->enc_priv) |
return; |
dig = radeon_encoder->enc_priv; |
memset(&args, 0, sizeof(args)); |
if (ASIC_IS_DCE32(rdev)) { |
if (dig->dig_block) |
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); |
num = dig->dig_block + 1; |
} else { |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); |
num = 1; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); |
num = 2; |
break; |
} |
} |
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
args.ucAction = action; |
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
if (ASIC_IS_DCE32(rdev)) { |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; |
break; |
} |
} else { |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2; |
break; |
} |
} |
if (radeon_encoder->pixel_clock > 165000) { |
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; |
args.ucLaneNum = 8; |
} else { |
if (dig_connector->linkb) |
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; |
else |
args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; |
args.ucLaneNum = 4; |
} |
args.ucEncoderMode = atombios_get_encoder_mode(encoder); |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
union dig_transmitter_control { |
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; |
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; |
}; |
static void |
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
union dig_transmitter_control args; |
int index = 0, num = 0; |
uint8_t frev, crev; |
struct radeon_encoder_atom_dig *dig; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
struct radeon_connector_atom_dig *dig_connector; |
connector = radeon_get_connector_for_encoder(encoder); |
if (!connector) |
return; |
radeon_connector = to_radeon_connector(connector); |
if (!radeon_encoder->enc_priv) |
return; |
dig = radeon_encoder->enc_priv; |
if (!radeon_connector->con_priv) |
return; |
dig_connector = radeon_connector->con_priv; |
memset(&args, 0, sizeof(args)); |
if (ASIC_IS_DCE32(rdev)) |
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); |
else { |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl); |
break; |
} |
} |
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
args.v1.ucAction = action; |
if (ASIC_IS_DCE32(rdev)) { |
if (radeon_encoder->pixel_clock > 165000) { |
args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100); |
args.v2.acConfig.fDualLinkConnector = 1; |
} else { |
args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100); |
} |
if (dig->dig_block) |
args.v2.acConfig.ucEncoderSel = 1; |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
args.v2.acConfig.ucTransmitterSel = 0; |
num = 0; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
args.v2.acConfig.ucTransmitterSel = 1; |
num = 1; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
args.v2.acConfig.ucTransmitterSel = 2; |
num = 2; |
break; |
} |
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
if (dig->coherent_mode) |
args.v2.acConfig.fCoherentMode = 1; |
} |
} else { |
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; |
if (rdev->flags & RADEON_IS_IGP) { |
if (radeon_encoder->pixel_clock > 165000) { |
args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | |
ATOM_TRANSMITTER_CONFIG_LINKA_B); |
if (dig_connector->igp_lane_info & 0x3) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; |
else if (dig_connector->igp_lane_info & 0xc) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; |
} else { |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; |
if (dig_connector->igp_lane_info & 0x1) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
else if (dig_connector->igp_lane_info & 0x2) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; |
else if (dig_connector->igp_lane_info & 0x4) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; |
else if (dig_connector->igp_lane_info & 0x8) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; |
} |
} else { |
if (radeon_encoder->pixel_clock > 165000) |
args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | |
ATOM_TRANSMITTER_CONFIG_LINKA_B | |
ATOM_TRANSMITTER_CONFIG_LANE_0_7); |
else { |
if (dig_connector->linkb) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
else |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
} |
} |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; |
if (radeon_encoder->pixel_clock > 165000) |
args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | |
ATOM_TRANSMITTER_CONFIG_LINKA_B | |
ATOM_TRANSMITTER_CONFIG_LANE_0_7); |
else { |
if (dig_connector->linkb) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
else |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
} |
break; |
} |
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
if (dig->coherent_mode) |
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; |
} |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) |
{ |
WREG32(0x659C, 0x0); |
WREG32(0x6594, 0x705); |
WREG32(0x65A4, 0x10001); |
WREG32(0x65D8, 0x0); |
WREG32(0x65B0, 0x0); |
WREG32(0x65C0, 0x0); |
WREG32(0x65D4, 0x0); |
WREG32(0x6578, 0x0); |
WREG32(0x657C, 0x841880A8); |
WREG32(0x6578, 0x1); |
WREG32(0x657C, 0x84208680); |
WREG32(0x6578, 0x2); |
WREG32(0x657C, 0xBFF880B0); |
WREG32(0x6578, 0x100); |
WREG32(0x657C, 0x83D88088); |
WREG32(0x6578, 0x101); |
WREG32(0x657C, 0x84608680); |
WREG32(0x6578, 0x102); |
WREG32(0x657C, 0xBFF080D0); |
WREG32(0x6578, 0x200); |
WREG32(0x657C, 0x83988068); |
WREG32(0x6578, 0x201); |
WREG32(0x657C, 0x84A08680); |
WREG32(0x6578, 0x202); |
WREG32(0x657C, 0xBFF080F8); |
WREG32(0x6578, 0x300); |
WREG32(0x657C, 0x83588058); |
WREG32(0x6578, 0x301); |
WREG32(0x657C, 0x84E08660); |
WREG32(0x6578, 0x302); |
WREG32(0x657C, 0xBFF88120); |
WREG32(0x6578, 0x400); |
WREG32(0x657C, 0x83188040); |
WREG32(0x6578, 0x401); |
WREG32(0x657C, 0x85008660); |
WREG32(0x6578, 0x402); |
WREG32(0x657C, 0xBFF88150); |
WREG32(0x6578, 0x500); |
WREG32(0x657C, 0x82D88030); |
WREG32(0x6578, 0x501); |
WREG32(0x657C, 0x85408640); |
WREG32(0x6578, 0x502); |
WREG32(0x657C, 0xBFF88180); |
WREG32(0x6578, 0x600); |
WREG32(0x657C, 0x82A08018); |
WREG32(0x6578, 0x601); |
WREG32(0x657C, 0x85808620); |
WREG32(0x6578, 0x602); |
WREG32(0x657C, 0xBFF081B8); |
WREG32(0x6578, 0x700); |
WREG32(0x657C, 0x82608010); |
WREG32(0x6578, 0x701); |
WREG32(0x657C, 0x85A08600); |
WREG32(0x6578, 0x702); |
WREG32(0x657C, 0x800081F0); |
WREG32(0x6578, 0x800); |
WREG32(0x657C, 0x8228BFF8); |
WREG32(0x6578, 0x801); |
WREG32(0x657C, 0x85E085E0); |
WREG32(0x6578, 0x802); |
WREG32(0x657C, 0xBFF88228); |
WREG32(0x6578, 0x10000); |
WREG32(0x657C, 0x82A8BF00); |
WREG32(0x6578, 0x10001); |
WREG32(0x657C, 0x82A08CC0); |
WREG32(0x6578, 0x10002); |
WREG32(0x657C, 0x8008BEF8); |
WREG32(0x6578, 0x10100); |
WREG32(0x657C, 0x81F0BF28); |
WREG32(0x6578, 0x10101); |
WREG32(0x657C, 0x83608CA0); |
WREG32(0x6578, 0x10102); |
WREG32(0x657C, 0x8018BED0); |
WREG32(0x6578, 0x10200); |
WREG32(0x657C, 0x8148BF38); |
WREG32(0x6578, 0x10201); |
WREG32(0x657C, 0x84408C80); |
WREG32(0x6578, 0x10202); |
WREG32(0x657C, 0x8008BEB8); |
WREG32(0x6578, 0x10300); |
WREG32(0x657C, 0x80B0BF78); |
WREG32(0x6578, 0x10301); |
WREG32(0x657C, 0x85008C20); |
WREG32(0x6578, 0x10302); |
WREG32(0x657C, 0x8020BEA0); |
WREG32(0x6578, 0x10400); |
WREG32(0x657C, 0x8028BF90); |
WREG32(0x6578, 0x10401); |
WREG32(0x657C, 0x85E08BC0); |
WREG32(0x6578, 0x10402); |
WREG32(0x657C, 0x8018BE90); |
WREG32(0x6578, 0x10500); |
WREG32(0x657C, 0xBFB8BFB0); |
WREG32(0x6578, 0x10501); |
WREG32(0x657C, 0x86C08B40); |
WREG32(0x6578, 0x10502); |
WREG32(0x657C, 0x8010BE90); |
WREG32(0x6578, 0x10600); |
WREG32(0x657C, 0xBF58BFC8); |
WREG32(0x6578, 0x10601); |
WREG32(0x657C, 0x87A08AA0); |
WREG32(0x6578, 0x10602); |
WREG32(0x657C, 0x8010BE98); |
WREG32(0x6578, 0x10700); |
WREG32(0x657C, 0xBF10BFF0); |
WREG32(0x6578, 0x10701); |
WREG32(0x657C, 0x886089E0); |
WREG32(0x6578, 0x10702); |
WREG32(0x657C, 0x8018BEB0); |
WREG32(0x6578, 0x10800); |
WREG32(0x657C, 0xBED8BFE8); |
WREG32(0x6578, 0x10801); |
WREG32(0x657C, 0x89408940); |
WREG32(0x6578, 0x10802); |
WREG32(0x657C, 0xBFE8BED8); |
WREG32(0x6578, 0x20000); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20001); |
WREG32(0x657C, 0x90008000); |
WREG32(0x6578, 0x20002); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20003); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20100); |
WREG32(0x657C, 0x80108000); |
WREG32(0x6578, 0x20101); |
WREG32(0x657C, 0x8FE0BF70); |
WREG32(0x6578, 0x20102); |
WREG32(0x657C, 0xBFE880C0); |
WREG32(0x6578, 0x20103); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20200); |
WREG32(0x657C, 0x8018BFF8); |
WREG32(0x6578, 0x20201); |
WREG32(0x657C, 0x8F80BF08); |
WREG32(0x6578, 0x20202); |
WREG32(0x657C, 0xBFD081A0); |
WREG32(0x6578, 0x20203); |
WREG32(0x657C, 0xBFF88000); |
WREG32(0x6578, 0x20300); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20301); |
WREG32(0x657C, 0x8EE0BEC0); |
WREG32(0x6578, 0x20302); |
WREG32(0x657C, 0xBFB082A0); |
WREG32(0x6578, 0x20303); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20400); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20401); |
WREG32(0x657C, 0x8E00BEA0); |
WREG32(0x6578, 0x20402); |
WREG32(0x657C, 0xBF8883C0); |
WREG32(0x6578, 0x20403); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20500); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20501); |
WREG32(0x657C, 0x8D00BE90); |
WREG32(0x6578, 0x20502); |
WREG32(0x657C, 0xBF588500); |
WREG32(0x6578, 0x20503); |
WREG32(0x657C, 0x80008008); |
WREG32(0x6578, 0x20600); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20601); |
WREG32(0x657C, 0x8BC0BE98); |
WREG32(0x6578, 0x20602); |
WREG32(0x657C, 0xBF308660); |
WREG32(0x6578, 0x20603); |
WREG32(0x657C, 0x80008008); |
WREG32(0x6578, 0x20700); |
WREG32(0x657C, 0x80108000); |
WREG32(0x6578, 0x20701); |
WREG32(0x657C, 0x8A80BEB0); |
WREG32(0x6578, 0x20702); |
WREG32(0x657C, 0xBF0087C0); |
WREG32(0x6578, 0x20703); |
WREG32(0x657C, 0x80008008); |
WREG32(0x6578, 0x20800); |
WREG32(0x657C, 0x80108000); |
WREG32(0x6578, 0x20801); |
WREG32(0x657C, 0x8920BED0); |
WREG32(0x6578, 0x20802); |
WREG32(0x657C, 0xBED08920); |
WREG32(0x6578, 0x20803); |
WREG32(0x657C, 0x80008010); |
WREG32(0x6578, 0x30000); |
WREG32(0x657C, 0x90008000); |
WREG32(0x6578, 0x30001); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x30100); |
WREG32(0x657C, 0x8FE0BF90); |
WREG32(0x6578, 0x30101); |
WREG32(0x657C, 0xBFF880A0); |
WREG32(0x6578, 0x30200); |
WREG32(0x657C, 0x8F60BF40); |
WREG32(0x6578, 0x30201); |
WREG32(0x657C, 0xBFE88180); |
WREG32(0x6578, 0x30300); |
WREG32(0x657C, 0x8EC0BF00); |
WREG32(0x6578, 0x30301); |
WREG32(0x657C, 0xBFC88280); |
WREG32(0x6578, 0x30400); |
WREG32(0x657C, 0x8DE0BEE0); |
WREG32(0x6578, 0x30401); |
WREG32(0x657C, 0xBFA083A0); |
WREG32(0x6578, 0x30500); |
WREG32(0x657C, 0x8CE0BED0); |
WREG32(0x6578, 0x30501); |
WREG32(0x657C, 0xBF7884E0); |
WREG32(0x6578, 0x30600); |
WREG32(0x657C, 0x8BA0BED8); |
WREG32(0x6578, 0x30601); |
WREG32(0x657C, 0xBF508640); |
WREG32(0x6578, 0x30700); |
WREG32(0x657C, 0x8A60BEE8); |
WREG32(0x6578, 0x30701); |
WREG32(0x657C, 0xBF2087A0); |
WREG32(0x6578, 0x30800); |
WREG32(0x657C, 0x8900BF00); |
WREG32(0x6578, 0x30801); |
WREG32(0x657C, 0xBF008900); |
} |
static void |
atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
ENABLE_YUV_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); |
uint32_t temp, reg; |
memset(&args, 0, sizeof(args)); |
if (rdev->family >= CHIP_R600) |
reg = R600_BIOS_3_SCRATCH; |
else |
reg = RADEON_BIOS_3_SCRATCH; |
/* XXX: fix up scratch reg handling */ |
temp = RREG32(reg); |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
WREG32(reg, (ATOM_S3_TV1_ACTIVE | |
(radeon_crtc->crtc_id << 18))); |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); |
else |
WREG32(reg, 0); |
if (enable) |
args.ucEnable = ATOM_ENABLE; |
args.ucCRTC = radeon_crtc->crtc_id; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
WREG32(reg, temp); |
} |
static void |
atombios_overscan_setup(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
SET_CRTC_OVERSCAN_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); |
memset(&args, 0, sizeof(args)); |
args.usOverscanRight = 0; |
args.usOverscanLeft = 0; |
args.usOverscanBottom = 0; |
args.usOverscanTop = 0; |
args.ucCRTC = radeon_crtc->crtc_id; |
if (radeon_encoder->flags & RADEON_USE_RMX) { |
if (radeon_encoder->rmx_type == RMX_FULL) { |
args.usOverscanRight = 0; |
args.usOverscanLeft = 0; |
args.usOverscanBottom = 0; |
args.usOverscanTop = 0; |
} else if (radeon_encoder->rmx_type == RMX_CENTER) { |
args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; |
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; |
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; |
} else if (radeon_encoder->rmx_type == RMX_ASPECT) { |
int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; |
int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; |
if (a1 > a2) { |
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; |
} else if (a2 > a1) { |
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; |
} |
} |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void |
atombios_scaler_setup(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
ENABLE_SCALER_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) |
return; |
memset(&args, 0, sizeof(args)); |
args.ucScaler = radeon_crtc->crtc_id; |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { |
switch (tv_std) { |
case TV_STD_NTSC: |
default: |
args.ucTVStandard = ATOM_TV_NTSC; |
break; |
case TV_STD_PAL: |
args.ucTVStandard = ATOM_TV_PAL; |
break; |
case TV_STD_PAL_M: |
args.ucTVStandard = ATOM_TV_PALM; |
break; |
case TV_STD_PAL_60: |
args.ucTVStandard = ATOM_TV_PAL60; |
break; |
case TV_STD_NTSC_J: |
args.ucTVStandard = ATOM_TV_NTSCJ; |
break; |
case TV_STD_SCART_PAL: |
args.ucTVStandard = ATOM_TV_PAL; /* ??? */ |
break; |
case TV_STD_SECAM: |
args.ucTVStandard = ATOM_TV_SECAM; |
break; |
case TV_STD_PAL_CN: |
args.ucTVStandard = ATOM_TV_PALCN; |
break; |
} |
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; |
} else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { |
args.ucTVStandard = ATOM_TV_CV; |
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; |
} else if (radeon_encoder->flags & RADEON_USE_RMX) { |
if (radeon_encoder->rmx_type == RMX_FULL) |
args.ucEnable = ATOM_SCALER_EXPANSION; |
else if (radeon_encoder->rmx_type == RMX_CENTER) |
args.ucEnable = ATOM_SCALER_CENTER; |
else if (radeon_encoder->rmx_type == RMX_ASPECT) |
args.ucEnable = ATOM_SCALER_EXPANSION; |
} else { |
if (ASIC_IS_AVIVO(rdev)) |
args.ucEnable = ATOM_SCALER_DISABLE; |
else |
args.ucEnable = ATOM_SCALER_CENTER; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) |
&& rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { |
atom_rv515_force_tv_scaler(rdev); |
} |
} |
static void |
radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
int index = 0; |
bool is_dig = false; |
memset(&args, 0, sizeof(args)); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
is_dig = true; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_DDI: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); |
break; |
} |
if (is_dig) { |
switch (mode) { |
case DRM_MODE_DPMS_ON: |
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); |
break; |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
case DRM_MODE_DPMS_OFF: |
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); |
break; |
} |
} else { |
switch (mode) { |
case DRM_MODE_DPMS_ON: |
args.ucAction = ATOM_ENABLE; |
break; |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
case DRM_MODE_DPMS_OFF: |
args.ucAction = ATOM_DISABLE; |
break; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); |
} |
union crtc_sourc_param { |
SELECT_CRTC_SOURCE_PS_ALLOCATION v1; |
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; |
}; |
static void |
atombios_set_encoder_crtc_source(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
union crtc_sourc_param args; |
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); |
uint8_t frev, crev; |
memset(&args, 0, sizeof(args)); |
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
switch (frev) { |
case 1: |
switch (crev) { |
case 1: |
default: |
if (ASIC_IS_AVIVO(rdev)) |
args.v1.ucCRTC = radeon_crtc->crtc_id; |
else { |
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { |
args.v1.ucCRTC = radeon_crtc->crtc_id; |
} else { |
args.v1.ucCRTC = radeon_crtc->crtc_id << 2; |
} |
} |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) |
args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; |
else |
args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_DDI: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; |
else |
args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; |
else |
args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; |
break; |
} |
break; |
case 2: |
args.v2.ucCRTC = radeon_crtc->crtc_id; |
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
if (ASIC_IS_DCE32(rdev)) { |
if (radeon_crtc->crtc_id) |
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
else |
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; |
} else |
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else |
args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else |
args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; |
break; |
} |
break; |
} |
break; |
default: |
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); |
break; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void |
atombios_apply_encoder_quirks(struct drm_encoder *encoder, |
struct drm_display_mode *mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
/* Funky macbooks */ |
if ((dev->pdev->device == 0x71C5) && |
(dev->pdev->subsystem_vendor == 0x106b) && |
(dev->pdev->subsystem_device == 0x0080)) { |
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { |
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); |
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; |
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; |
WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); |
} |
} |
/* set scaler clears this on some chips */ |
if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) |
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN); |
} |
static void |
radeon_atom_encoder_mode_set(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
if (radeon_encoder->enc_priv) { |
struct radeon_encoder_atom_dig *dig; |
dig = radeon_encoder->enc_priv; |
dig->dig_block = radeon_crtc->crtc_id; |
} |
radeon_encoder->pixel_clock = adjusted_mode->clock; |
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
atombios_overscan_setup(encoder, mode, adjusted_mode); |
atombios_scaler_setup(encoder); |
atombios_set_encoder_crtc_source(encoder); |
if (ASIC_IS_AVIVO(rdev)) { |
if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
atombios_yuv_setup(encoder, true); |
else |
atombios_yuv_setup(encoder, false); |
} |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
/* disable the encoder and transmitter */ |
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); |
atombios_dig_encoder_setup(encoder, ATOM_DISABLE); |
/* setup and enable the encoder and transmitter */ |
atombios_dig_encoder_setup(encoder, ATOM_ENABLE); |
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); |
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DDI: |
atombios_ddia_setup(encoder, ATOM_ENABLE); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
atombios_external_tmds_setup(encoder, ATOM_ENABLE); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
atombios_dac_setup(encoder, ATOM_ENABLE); |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
atombios_tv_setup(encoder, ATOM_ENABLE); |
break; |
} |
atombios_apply_encoder_quirks(encoder, adjusted_mode); |
} |
static bool |
atombios_dac_load_detect(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | |
ATOM_DEVICE_CV_SUPPORT | |
ATOM_DEVICE_CRT_SUPPORT)) { |
DAC_LOAD_DETECTION_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); |
uint8_t frev, crev; |
memset(&args, 0, sizeof(args)); |
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
args.sDacload.ucMisc = 0; |
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || |
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) |
args.sDacload.ucDacType = ATOM_DAC_A; |
else |
args.sDacload.ucDacType = ATOM_DAC_B; |
if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); |
else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); |
else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); |
if (crev >= 3) |
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; |
} else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); |
if (crev >= 3) |
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
return true; |
} else |
return false; |
} |
static enum drm_connector_status |
radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
uint32_t bios_0_scratch; |
if (!atombios_dac_load_detect(encoder)) { |
DRM_DEBUG("detect returned false \n"); |
return connector_status_unknown; |
} |
if (rdev->family >= CHIP_R600) |
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); |
else |
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); |
DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch); |
if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { |
if (bios_0_scratch & ATOM_S0_CRT1_MASK) |
return connector_status_connected; |
} else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { |
if (bios_0_scratch & ATOM_S0_CRT2_MASK) |
return connector_status_connected; |
} else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { |
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) |
return connector_status_connected; |
} else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { |
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) |
return connector_status_connected; /* CTV */ |
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) |
return connector_status_connected; /* STV */ |
} |
return connector_status_disconnected; |
} |
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
{ |
radeon_atom_output_lock(encoder, true); |
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
} |
static void radeon_atom_encoder_commit(struct drm_encoder *encoder) |
{ |
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); |
radeon_atom_output_lock(encoder, false); |
} |
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { |
.dpms = radeon_atom_encoder_dpms, |
.mode_fixup = radeon_atom_mode_fixup, |
.prepare = radeon_atom_encoder_prepare, |
.mode_set = radeon_atom_encoder_mode_set, |
.commit = radeon_atom_encoder_commit, |
/* no detect for TMDS/LVDS yet */ |
}; |
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { |
.dpms = radeon_atom_encoder_dpms, |
.mode_fixup = radeon_atom_mode_fixup, |
.prepare = radeon_atom_encoder_prepare, |
.mode_set = radeon_atom_encoder_mode_set, |
.commit = radeon_atom_encoder_commit, |
.detect = radeon_atom_dac_detect, |
}; |
void radeon_enc_destroy(struct drm_encoder *encoder) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
kfree(radeon_encoder->enc_priv); |
drm_encoder_cleanup(encoder); |
kfree(radeon_encoder); |
} |
static const struct drm_encoder_funcs radeon_atom_enc_funcs = { |
.destroy = radeon_enc_destroy, |
}; |
struct radeon_encoder_atom_dig * |
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) |
{ |
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); |
if (!dig) |
return NULL; |
/* coherent mode by default */ |
dig->coherent_mode = true; |
return dig; |
} |
void |
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
{ |
struct drm_encoder *encoder; |
struct radeon_encoder *radeon_encoder; |
/* see if we already added it */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->encoder_id == encoder_id) { |
radeon_encoder->devices |= supported_device; |
return; |
} |
} |
/* add a new one */ |
radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); |
if (!radeon_encoder) |
return; |
encoder = &radeon_encoder->base; |
encoder->possible_crtcs = 0x3; |
encoder->possible_clones = 0; |
radeon_encoder->enc_priv = NULL; |
radeon_encoder->encoder_id = encoder_id; |
radeon_encoder->devices = supported_device; |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
radeon_encoder->rmx_type = RMX_FULL; |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); |
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); |
} else { |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
} |
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); |
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); |
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_DDI: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
break; |
} |
} |
/drivers/video/drm/radeon/radeon_fence.c |
---|
0,0 → 1,387 |
/* |
* Copyright 2009 Jerome Glisse. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
*/ |
/* |
* Authors: |
* Jerome Glisse <glisse@freedesktop.org> |
* Dave Airlie |
*/ |
#include <linux/seq_file.h> |
#include <asm/atomic.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/kref.h> |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) |
{ |
unsigned long irq_flags; |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
if (fence->emited) { |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return 0; |
} |
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); |
if (!rdev->cp.ready) { |
/* FIXME: cp is not running assume everythings is done right |
* away |
*/ |
WREG32(rdev->fence_drv.scratch_reg, fence->seq); |
} else { |
radeon_fence_ring_emit(rdev, fence); |
} |
fence->emited = true; |
fence->timeout = jiffies + ((2000 * HZ) / 1000); |
list_del(&fence->list); |
list_add_tail(&fence->list, &rdev->fence_drv.emited); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return 0; |
} |
static bool radeon_fence_poll_locked(struct radeon_device *rdev) |
{ |
struct radeon_fence *fence; |
struct list_head *i, *n; |
uint32_t seq; |
bool wake = false; |
if (rdev == NULL) { |
return true; |
} |
if (rdev->shutdown) { |
return true; |
} |
seq = RREG32(rdev->fence_drv.scratch_reg); |
rdev->fence_drv.last_seq = seq; |
n = NULL; |
list_for_each(i, &rdev->fence_drv.emited) { |
fence = list_entry(i, struct radeon_fence, list); |
if (fence->seq == seq) { |
n = i; |
break; |
} |
} |
/* all fence previous to this one are considered as signaled */ |
if (n) { |
i = n; |
do { |
n = i->prev; |
list_del(i); |
list_add_tail(i, &rdev->fence_drv.signaled); |
fence = list_entry(i, struct radeon_fence, list); |
fence->signaled = true; |
i = n; |
} while (i != &rdev->fence_drv.emited); |
wake = true; |
} |
return wake; |
} |
static void radeon_fence_destroy(struct kref *kref) |
{ |
unsigned long irq_flags; |
struct radeon_fence *fence; |
fence = container_of(kref, struct radeon_fence, kref); |
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); |
list_del(&fence->list); |
fence->emited = false; |
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); |
kfree(fence); |
} |
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) |
{ |
unsigned long irq_flags; |
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
if ((*fence) == NULL) { |
return -ENOMEM; |
} |
kref_init(&((*fence)->kref)); |
(*fence)->rdev = rdev; |
(*fence)->emited = false; |
(*fence)->signaled = false; |
(*fence)->seq = 0; |
INIT_LIST_HEAD(&(*fence)->list); |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
list_add_tail(&(*fence)->list, &rdev->fence_drv.created); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return 0; |
} |
bool radeon_fence_signaled(struct radeon_fence *fence) |
{ |
struct radeon_device *rdev = fence->rdev; |
unsigned long irq_flags; |
bool signaled = false; |
if (rdev->gpu_lockup) { |
return true; |
} |
if (fence == NULL) { |
return true; |
} |
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); |
signaled = fence->signaled; |
/* if we are shuting down report all fence as signaled */ |
if (fence->rdev->shutdown) { |
signaled = true; |
} |
if (!fence->emited) { |
WARN(1, "Querying an unemited fence : %p !\n", fence); |
signaled = true; |
} |
if (!signaled) { |
radeon_fence_poll_locked(fence->rdev); |
signaled = fence->signaled; |
} |
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); |
return signaled; |
} |
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible) |
{ |
struct radeon_device *rdev; |
unsigned long cur_jiffies; |
unsigned long timeout; |
bool expired = false; |
int r; |
if (fence == NULL) { |
WARN(1, "Querying an invalid fence : %p !\n", fence); |
return 0; |
} |
rdev = fence->rdev; |
if (radeon_fence_signaled(fence)) { |
return 0; |
} |
retry: |
cur_jiffies = jiffies; |
timeout = HZ / 100; |
if (time_after(fence->timeout, cur_jiffies)) { |
timeout = fence->timeout - cur_jiffies; |
} |
if (interruptible) { |
r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
radeon_fence_signaled(fence), timeout); |
if (unlikely(r == -ERESTARTSYS)) { |
return -ERESTART; |
} |
} else { |
r = wait_event_timeout(rdev->fence_drv.queue, |
radeon_fence_signaled(fence), timeout); |
} |
if (unlikely(!radeon_fence_signaled(fence))) { |
if (unlikely(r == 0)) { |
expired = true; |
} |
if (unlikely(expired)) { |
timeout = 1; |
if (time_after(cur_jiffies, fence->timeout)) { |
timeout = cur_jiffies - fence->timeout; |
} |
timeout = jiffies_to_msecs(timeout); |
if (timeout > 500) { |
DRM_ERROR("fence(%p:0x%08X) %lums timeout " |
"going to reset GPU\n", |
fence, fence->seq, timeout); |
radeon_gpu_reset(rdev); |
WREG32(rdev->fence_drv.scratch_reg, fence->seq); |
} |
} |
goto retry; |
} |
if (unlikely(expired)) { |
rdev->fence_drv.count_timeout++; |
cur_jiffies = jiffies; |
timeout = 1; |
if (time_after(cur_jiffies, fence->timeout)) { |
timeout = cur_jiffies - fence->timeout; |
} |
timeout = jiffies_to_msecs(timeout); |
DRM_ERROR("fence(%p:0x%08X) %lums timeout\n", |
fence, fence->seq, timeout); |
DRM_ERROR("last signaled fence(0x%08X)\n", |
rdev->fence_drv.last_seq); |
} |
return 0; |
} |
int radeon_fence_wait_next(struct radeon_device *rdev) |
{ |
unsigned long irq_flags; |
struct radeon_fence *fence; |
int r; |
if (rdev->gpu_lockup) { |
return 0; |
} |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
if (list_empty(&rdev->fence_drv.emited)) { |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return 0; |
} |
fence = list_entry(rdev->fence_drv.emited.next, |
struct radeon_fence, list); |
radeon_fence_ref(fence); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
r = radeon_fence_wait(fence, false); |
radeon_fence_unref(&fence); |
return r; |
} |
int radeon_fence_wait_last(struct radeon_device *rdev) |
{ |
unsigned long irq_flags; |
struct radeon_fence *fence; |
int r; |
if (rdev->gpu_lockup) { |
return 0; |
} |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
if (list_empty(&rdev->fence_drv.emited)) { |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return 0; |
} |
fence = list_entry(rdev->fence_drv.emited.prev, |
struct radeon_fence, list); |
radeon_fence_ref(fence); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
r = radeon_fence_wait(fence, false); |
radeon_fence_unref(&fence); |
return r; |
} |
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
{ |
kref_get(&fence->kref); |
return fence; |
} |
void radeon_fence_unref(struct radeon_fence **fence) |
{ |
struct radeon_fence *tmp = *fence; |
*fence = NULL; |
if (tmp) { |
kref_put(&tmp->kref, &radeon_fence_destroy); |
} |
} |
void radeon_fence_process(struct radeon_device *rdev) |
{ |
unsigned long irq_flags; |
bool wake; |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
wake = radeon_fence_poll_locked(rdev); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
if (wake) { |
wake_up_all(&rdev->fence_drv.queue); |
} |
} |
int radeon_fence_driver_init(struct radeon_device *rdev) |
{ |
unsigned long irq_flags; |
int r; |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
if (r) { |
DRM_ERROR("Fence failed to get a scratch register."); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return r; |
} |
WREG32(rdev->fence_drv.scratch_reg, 0); |
atomic_set(&rdev->fence_drv.seq, 0); |
INIT_LIST_HEAD(&rdev->fence_drv.created); |
INIT_LIST_HEAD(&rdev->fence_drv.emited); |
INIT_LIST_HEAD(&rdev->fence_drv.signaled); |
rdev->fence_drv.count_timeout = 0; |
init_waitqueue_head(&rdev->fence_drv.queue); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
if (radeon_debugfs_fence_init(rdev)) { |
DRM_ERROR("Failed to register debugfs file for fence !\n"); |
} |
return 0; |
} |
void radeon_fence_driver_fini(struct radeon_device *rdev) |
{ |
unsigned long irq_flags; |
wake_up_all(&rdev->fence_drv.queue); |
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); |
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
DRM_INFO("radeon: fence finalized\n"); |
} |
/* |
* Fence debugfs |
*/ |
#if defined(CONFIG_DEBUG_FS) |
static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
{ |
struct drm_info_node *node = (struct drm_info_node *)m->private; |
struct drm_device *dev = node->minor->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_fence *fence; |
seq_printf(m, "Last signaled fence 0x%08X\n", |
RREG32(rdev->fence_drv.scratch_reg)); |
if (!list_empty(&rdev->fence_drv.emited)) { |
fence = list_entry(rdev->fence_drv.emited.prev, |
struct radeon_fence, list); |
seq_printf(m, "Last emited fence %p with 0x%08X\n", |
fence, fence->seq); |
} |
return 0; |
} |
static struct drm_info_list radeon_debugfs_fence_list[] = { |
{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
}; |
#endif |
int radeon_debugfs_fence_init(struct radeon_device *rdev) |
{ |
#if defined(CONFIG_DEBUG_FS) |
return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
#else |
return 0; |
#endif |
} |
/drivers/video/drm/radeon/radeon_gart.c |
---|
25,7 → 25,7 |
* Alex Deucher |
* Jerome Glisse |
*/ |
//#include "drmP.h" |
#include "drmP.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
#include "radeon_reg.h" |
80,6 → 80,7 |
uint32_t gpu_addr; |
int r; |
dbgprintf("%s\n",__FUNCTION__); |
if (rdev->gart.table.vram.robj == NULL) { |
r = radeon_object_create(rdev, NULL, |
/drivers/video/drm/radeon/radeon_i2c.c |
---|
179,7 → 179,7 |
i2c->algo.timeout = 2; |
i2c->algo.data = i2c; |
i2c->rec = *rec; |
i2c_set_adapdata(&i2c->adapter, i2c); |
// i2c_set_adapdata(&i2c->adapter, i2c); |
ret = i2c_bit_add_bus(&i2c->adapter); |
if (ret) { |
199,7 → 199,7 |
if (!i2c) |
return; |
i2c_del_adapter(&i2c->adapter); |
// i2c_del_adapter(&i2c->adapter); |
kfree(i2c); |
} |
/drivers/video/drm/radeon/radeon_mode.h |
---|
30,14 → 30,12 |
#ifndef RADEON_MODE_H |
#define RADEON_MODE_H |
#include "drm_mode.h" |
#include "drm_crtc.h" |
#include <drm_crtc.h> |
#include <drm_mode.h> |
#include <drm_edid.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
//#include <linux/i2c.h> |
//#include <linux/i2c-id.h> |
//#include <linux/i2c-algo-bit.h> |
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) |
#define to_radeon_connector(x) container_of(x, struct radeon_connector, base) |
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) |
/drivers/video/drm/radeon/radeon_object.c |
---|
29,8 → 29,8 |
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
* Dave Airlie |
*/ |
//#include <linux/list.h> |
//#include <drm/drmP.h> |
#include <list.h> |
#include <drmP.h> |
#include "radeon_drm.h" |
#include "radeon.h" |
359,6 → 359,8 |
{ |
int r = 0; |
dbgprintf("%s\n",__FUNCTION__); |
r = drm_mm_init(&mm_vram, 0x800000 >> PAGE_SHIFT, |
((rdev->mc.aper_size - 0x800000) >> PAGE_SHIFT)); |
if (r) { |
/drivers/video/drm/radeon/radeon_ring.c |
---|
26,7 → 26,7 |
* Jerome Glisse |
*/ |
//#include <linux/seq_file.h> |
//#include "drmP.h" |
#include "drmP.h" |
#include "radeon_drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
99,7 → 99,6 |
return r; |
} |
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
{ |
struct radeon_ib *tmp = *ib; |
/drivers/video/drm/radeon/rv515.c |
---|
26,7 → 26,7 |
* Jerome Glisse |
*/ |
//#include <linux/seq_file.h> |
//#include "drmP.h" |
#include "drmP.h" |
#include "radeon_reg.h" |
#include "radeon.h" |