/drivers/video/drm/drm_crtc.c |
---|
259,8 → 259,6 |
EXPORT_SYMBOL(drm_mode_object_find); |
#if 0 |
/** |
* drm_crtc_from_fb - find the CRTC structure associated with an fb |
* @dev: DRM device |
346,8 → 344,6 |
EXPORT_SYMBOL(drm_framebuffer_cleanup); |
#endif |
/** |
* drm_crtc_init - Initialise a new CRTC object |
* @dev: DRM device |
/drivers/video/drm/drm_crtc_helper.c |
---|
282,6 → 282,8 |
{ |
struct drm_display_mode *mode; |
ENTRY(); |
list_for_each_entry(mode, &connector->modes, head) { |
if (drm_mode_width(mode) > width || |
drm_mode_height(mode) > height) |
312,7 → 314,7 |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
enabled[i] = drm_connector_enabled(connector, true); |
DRM_DEBUG("connector %d enabled? %s\n", connector->base.id, |
DRM_DEBUG("connector %d enabled ? %s\n", connector->base.id, |
enabled[i] ? "yes" : "no"); |
any_enabled |= enabled[i]; |
i++; |
380,6 → 382,8 |
c++; |
} |
dbgprintf("n= %d\n", n); |
best_crtcs[n] = NULL; |
best_crtc = NULL; |
best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); |
391,6 → 395,8 |
if (!crtcs) |
return best_score; |
dbgprintf("crtcs = %x\n", crtcs); |
my_score = 1; |
if (connector->status == connector_status_connected) |
my_score++; |
399,6 → 405,9 |
connector_funcs = connector->helper_private; |
encoder = connector_funcs->best_encoder(connector); |
dbgprintf("encoder = %x\n", encoder); |
if (!encoder) |
goto out; |
439,6 → 448,11 |
} |
out: |
kfree(crtcs); |
dbgprintf("best_score= %x\n", best_score); |
LEAVE(); |
return best_score; |
} |
454,8 → 468,8 |
DRM_DEBUG("\n"); |
width = dev->mode_config.max_width; |
height = dev->mode_config.max_height; |
width = 1280; //dev->mode_config.max_width; |
height = 1024; //dev->mode_config.max_height; |
/* clean out all the encoder/crtc combos */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
479,8 → 493,6 |
drm_pick_crtcs(dev, crtcs, modes, 0, width, height); |
dbgprintf("done\n"); |
i = 0; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
struct drm_display_mode *mode = modes[i]; |
495,6 → 507,8 |
DRM_DEBUG("desired mode %s set on crtc %d\n", |
mode->name, crtc->base.id); |
crtc->desired_mode = mode; |
// crtc->mode = *mode; |
crtc->enabled = true; |
connector->encoder->crtc = crtc; |
} else |
connector->encoder->crtc = NULL; |
589,6 → 603,8 |
struct drm_encoder *encoder; |
bool ret = true; |
ENTRY(); |
adjusted_mode = drm_mode_duplicate(dev, mode); |
crtc->enabled = drm_helper_crtc_in_use(crtc); |
680,7 → 696,7 |
crtc->x = saved_x; |
crtc->y = saved_y; |
} |
LEAVE(); |
return ret; |
} |
EXPORT_SYMBOL(drm_crtc_helper_set_mode); |
913,7 → 929,7 |
drm_setup_crtcs(dev); |
/* alert the driver fb layer */ |
// dev->mode_config.funcs->fb_changed(dev); |
dev->mode_config.funcs->fb_changed(dev); |
/* FIXME: send hotplug event */ |
return true; |
957,9 → 973,11 |
drm_setup_crtcs(dev); |
/* alert the driver fb layer */ |
// dev->mode_config.funcs->fb_changed(dev); |
radeonfb_create(dev->dev_private, 1280, 1024, 1280, 1024, NULL); |
// /* alert the driver fb layer */ |
dev->mode_config.funcs->fb_changed(dev); |
LEAVE(); |
return 0; |
1082,22 → 1100,50 |
} |
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); |
void sysSetScreen(int width, int height) |
{ |
asm __volatile__ |
( |
"decl %%eax \n\t" |
"dec %%edx \n\t" |
"call *__imp__SetScreen" |
: |
:"a" (width),"d"(height) |
:"memory","cc" |
); |
} |
int drm_helper_resume_force_mode(struct drm_device *dev) |
{ |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb; |
int ret; |
ENTRY(); |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (!crtc->enabled) |
continue; |
dbgprintf("mode %x x %x y %x fb %x\n", |
crtc->x, crtc->y, crtc->fb, crtc->mode); |
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, |
fb = list_first_entry(&dev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head); |
crtc->fb = fb; |
ret = drm_crtc_helper_set_mode(crtc, crtc->desired_mode, |
crtc->x, crtc->y, crtc->fb); |
if (ret == false) |
DRM_ERROR("failed to set mode on crtc %p\n", crtc); |
sysSetScreen(1280,1024); |
} |
LEAVE(); |
return 0; |
} |
EXPORT_SYMBOL(drm_helper_resume_force_mode); |
/drivers/video/drm/drm_mm.c |
---|
41,7 → 41,7 |
* Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
*/ |
//#include "drmP.h" |
#include "drmP.h" |
#include "drm_mm.h" |
//#include <linux/slab.h> |
120,7 → 120,7 |
spin_unlock(&mm->unused_lock); |
return 0; |
} |
//EXPORT_SYMBOL(drm_mm_pre_get); |
EXPORT_SYMBOL(drm_mm_pre_get); |
static int drm_mm_create_tail_node(struct drm_mm *mm, |
unsigned long start, |
215,7 → 215,7 |
return node; |
} |
//EXPORT_SYMBOL(drm_mm_get_block_generic); |
EXPORT_SYMBOL(drm_mm_get_block_generic); |
/* |
* Put a block. Merge with the previous and / or next block if they are free. |
275,7 → 275,7 |
} |
} |
//EXPORT_SYMBOL(drm_mm_put_block); |
EXPORT_SYMBOL(drm_mm_put_block); |
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
unsigned long size, |
316,7 → 316,7 |
return best; |
} |
//EXPORT_SYMBOL(drm_mm_search_free); |
EXPORT_SYMBOL(drm_mm_search_free); |
int drm_mm_clean(struct drm_mm * mm) |
{ |
324,7 → 324,7 |
return (head->next->next == head); |
} |
//EXPORT_SYMBOL(drm_mm_clean); |
EXPORT_SYMBOL(drm_mm_clean); |
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
{ |
336,7 → 336,7 |
return drm_mm_create_tail_node(mm, start, size, 0); |
} |
//EXPORT_SYMBOL(drm_mm_init); |
EXPORT_SYMBOL(drm_mm_init); |
void drm_mm_takedown(struct drm_mm * mm) |
{ |
366,4 → 366,4 |
BUG_ON(mm->num_unused != 0); |
} |
//EXPORT_SYMBOL(drm_mm_takedown); |
EXPORT_SYMBOL(drm_mm_takedown); |
/drivers/video/drm/idr.c |
---|
240,7 → 240,7 |
{ |
while (idp->id_free_cnt < IDR_FREE_MAX) { |
struct idr_layer *new; |
new = kzalloc(sizeof(new), gfp_mask); |
new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
if (new == NULL) |
return (0); |
move_to_free_list(idp, new); |
/drivers/video/drm/include/drmP.h |
---|
70,6 → 70,61 |
#define DRM_DEBUG(fmt, arg...) \ |
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##arg) |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
// struct kref refcount; |
/** Handle count of this object. Each handle also holds a reference */ |
// struct kref handlecount; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
// struct file *filp; |
/* Mapping info for this object */ |
// struct drm_map_list map_list; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
void *driver_private; |
}; |
#if 0 |
/***********************************************************************/ |
/drivers/video/drm/include/types.h |
---|
147,56 → 147,6 |
return ret; |
} |
struct drm_gem_object { |
/** Reference count of this object */ |
// struct kref refcount; |
/** Handle count of this object. Each handle also holds a reference */ |
// struct kref handlecount; |
/** Related drm device */ |
// struct drm_device *dev; |
/** File representing the shmem storage */ |
// struct file *filp; |
/* Mapping info for this object */ |
// struct drm_map_list map_list; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
void *driver_private; |
}; |
struct drm_file; |
#define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER) |
277,4 → 227,8 |
#define ENTRY() dbgprintf("entry %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#endif //__TYPES_H__ |
/drivers/video/drm/radeon/atom.h |
---|
26,7 → 26,7 |
#define ATOM_H |
#include <types.h> |
//#include "drmP.h" |
#include "drmP.h" |
#define ATOM_BIOS_MAGIC 0xAA55 |
#define ATOM_ATI_MAGIC_PTR 0x30 |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
311,7 → 311,6 |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
struct drm_framebuffer *old_fb) |
{ |
324,6 → 323,8 |
uint64_t fb_location; |
uint32_t fb_format, fb_pitch_pixels; |
ENTRY(); |
if (!crtc->fb) |
return -EINVAL; |
332,10 → 333,15 |
obj = radeon_fb->obj; |
obj_priv = obj->driver_private; |
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { |
// return -EINVAL; |
// } |
//if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { |
// return -EINVAL; |
//} |
fb_location = rdev->mc.vram_location; |
dbgprintf("fb_location %x\n", fb_location); |
dbgprintf("bpp %x\n", crtc->fb->bits_per_pixel); |
switch (crtc->fb->bits_per_pixel) { |
case 15: |
fb_format = |
400,10 → 406,10 |
radeon_fb = to_radeon_framebuffer(old_fb); |
// radeon_gem_object_unpin(radeon_fb->obj); |
} |
LEAVE(); |
return 0; |
} |
int atombios_crtc_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
415,6 → 421,8 |
struct drm_encoder *encoder; |
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing; |
ENTRY(); |
/* TODO color tiling */ |
memset(&crtc_timing, 0, sizeof(crtc_timing)); |
511,6 → 519,8 |
radeon_crtc_set_base(crtc, x, y, old_fb); |
radeon_legacy_atom_set_surface(crtc); |
} |
LEAVE(); |
return 0; |
} |
/drivers/video/drm/radeon/makefile |
---|
4,6 → 4,7 |
CFLAGS = -c -O2 -fomit-frame-pointer -fno-builtin-printf |
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0 --file-alignment 512 --section-alignment 4096 |
DRM_TOPDIR = $(CURDIR)/.. |
DRM_INCLUDES = $(DRM_TOPDIR)/include |
24,13 → 25,13 |
$(DRM_INCLUDES)/drm_crtc.h \ |
$(DRM_INCLUDES)/drm_mode.h \ |
$(DRM_INCLUDES)/drm_mm.h \ |
atom.h \ |
radeon.h \ |
atom.h \ |
radeon.h \ |
radeon_asic.h |
NAME_SRC= \ |
pci.c \ |
$(DRM_TOPDIR)/drm_mm.c \ |
NAME_SRC= \ |
pci.c \ |
$(DRM_TOPDIR)/drm_mm.c \ |
$(DRM_TOPDIR)/drm_edid.c \ |
$(DRM_TOPDIR)/drm_modes.c \ |
$(DRM_TOPDIR)/drm_crtc.c \ |
38,26 → 39,28 |
$(DRM_TOPDIR)/i2c/i2c-core.c \ |
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \ |
$(DRM_TOPDIR)/idr.c \ |
radeon_device.c \ |
radeon_clocks.c \ |
radeon_gem.c \ |
radeon_device.c \ |
radeon_clocks.c \ |
radeon_i2c.c \ |
atom.c \ |
radeon_atombios.c \ |
radeon_atombios.c \ |
atombios_crtc.c \ |
radeon_encoders.c \ |
radeon_connectors.c \ |
radeon_bios.c \ |
radeon_bios.c \ |
radeon_combios.c \ |
radeon_legacy_crtc.c \ |
radeon_legacy_encoders.c \ |
radeon_display.c \ |
radeon_object.c \ |
radeon_gart.c \ |
radeon_ring.c \ |
r100.c \ |
r300.c \ |
rv515.c \ |
r520.c |
radeon_object.c \ |
radeon_gart.c \ |
radeon_ring.c \ |
r100.c \ |
r300.c \ |
rv515.c \ |
r520.c \ |
radeon_fb.c |
SRC_DEP:= |
/drivers/video/drm/radeon/r300.c |
---|
150,7 → 150,7 |
if (i < 0 || i > rdev->gart.num_gpu_pages) { |
return -EINVAL; |
} |
addr = (((u32_t)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; |
addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; |
writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); |
return 0; |
} |
/drivers/video/drm/radeon/radeon.h |
---|
110,7 → 110,6 |
CHIP_RV770, |
CHIP_RV730, |
CHIP_RV710, |
CHIP_RV740, |
CHIP_LAST, |
}; |
231,6 → 230,16 |
struct list_head objects; |
}; |
int radeon_gem_init(struct radeon_device *rdev); |
void radeon_gem_fini(struct radeon_device *rdev); |
int radeon_gem_object_create(struct radeon_device *rdev, int size, |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
bool interruptible, |
struct drm_gem_object **obj); |
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
uint64_t *gpu_addr); |
void radeon_gem_object_unpin(struct drm_gem_object *obj); |
/* |
519,7 → 528,7 |
uint16_t bios_header_start; |
// struct radeon_object *stollen_vga_memory; |
// struct fb_info *fbdev_info; |
struct fb_info *fbdev_info; |
struct radeon_object *fbdev_robj; |
struct radeon_framebuffer *fbdev_rfb; |
549,7 → 558,7 |
struct radeon_ib_pool ib_pool; |
// struct radeon_irq irq; |
struct radeon_asic *asic; |
// struct radeon_gem gem; |
struct radeon_gem gem; |
// struct mutex cs_mutex; |
struct radeon_wb wb; |
bool gpu_lockup; |
898,11 → 907,6 |
{0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
/drivers/video/drm/radeon/radeon_connectors.c |
---|
24,8 → 24,7 |
* Alex Deucher |
*/ |
#include "drmP.h" |
//#include "drm_edid.h" |
#include "drm_crtc.h" |
#include "drm_edid.h" |
#include "drm_crtc_helper.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
84,6 → 83,8 |
struct drm_mode_object *obj; |
struct drm_encoder *encoder; |
ENTRY(); |
/* pick the encoder ids */ |
if (enc_id) { |
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); |
323,6 → 324,9 |
struct drm_mode_object *obj; |
struct drm_encoder *encoder; |
int i; |
ENTRY(); |
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
if (connector->encoder_ids[i] == 0) |
break; |
347,6 → 351,9 |
/* then check use digitial */ |
/* pick the first one */ |
dbgprintf("enc_id = %x\n", enc_id); |
if (enc_id) { |
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); |
if (!obj) |
/drivers/video/drm/radeon/radeon_device.c |
---|
28,7 → 28,7 |
//#include <linux/console.h> |
#include <drmP.h> |
//#include <drm/drm_crtc_helper.h> |
#include <drm_crtc_helper.h> |
#include "radeon_drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
124,7 → 124,7 |
*/ |
/* FGLRX seems to setup like this, VRAM a 0, then GART. |
*/ |
/* |
/* |
* Note: from R6xx the address space is 40bits but here we only |
* use 32bits (still have to see a card which would exhaust 4G |
* address space). |
285,7 → 285,6 |
} |
/* |
* ASIC |
*/ |
474,7 → 473,7 |
struct pci_dev *pdev, |
uint32_t flags) |
{ |
int r, ret = -1; |
int r, ret; |
dbgprintf("%s\n",__FUNCTION__); |
495,7 → 494,6 |
// mutex_init(&rdev->cp.mutex); |
// rwlock_init(&rdev->fence_drv.lock); |
if (radeon_agpmode == -1) { |
rdev->flags &= ~RADEON_IS_AGP; |
if (rdev->family > CHIP_RV515 || |
580,7 → 578,6 |
// radeon_combios_asic_init(rdev->ddev); |
} |
} |
/* Get vram informations */ |
radeon_vram_info(rdev); |
/* Device is severly broken if aper size > vram size. |
608,9 → 605,7 |
r = radeon_mc_init(rdev); |
if (r) { |
return r; |
}; |
} |
/* Fence driver */ |
// r = radeon_fence_driver_init(rdev); |
// if (r) { |
628,9 → 623,9 |
/* Initialize GART (initialize after TTM so we can allocate |
* memory through TTM but finalize after TTM) */ |
r = radeon_gart_enable(rdev); |
// if (!r) { |
// r = radeon_gem_init(rdev); |
// } |
if (!r) { |
r = radeon_gem_init(rdev); |
} |
/* 1M ring buffer */ |
if (!r) { |
672,11 → 667,12 |
if (!ret) { |
DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
} |
// if (radeon_benchmarking) { |
if (radeon_benchmarking) { |
// radeon_benchmark(rdev); |
// } |
} |
return ret; |
return -1; |
// return -1; |
} |
static struct pci_device_id pciidlist[] = { |
873,6 → 869,9 |
// driver->name, driver->major, driver->minor, driver->patchlevel, |
// driver->date, pci_name(pdev), dev->primary->index); |
drm_helper_resume_force_mode(dev); |
return 0; |
err_g4: |
/drivers/video/drm/radeon/radeon_display.c |
---|
169,7 → 169,7 |
// .cursor_set = radeon_crtc_cursor_set, |
// .cursor_move = radeon_crtc_cursor_move, |
.gamma_set = radeon_crtc_gamma_set, |
// .set_config = drm_crtc_helper_set_config, |
.set_config = drm_crtc_helper_set_config, |
.destroy = radeon_crtc_destroy, |
}; |
556,7 → 556,6 |
*post_div_p = best_post_div; |
} |
#if 0 |
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) |
{ |
591,7 → 590,6 |
.create_handle = radeon_user_framebuffer_create_handle, |
}; |
#endif |
struct drm_framebuffer * |
radeon_framebuffer_create(struct drm_device *dev, |
604,8 → 602,8 |
if (radeon_fb == NULL) { |
return NULL; |
} |
// drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); |
// drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); |
drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); |
drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); |
radeon_fb->obj = obj; |
return &radeon_fb->base; |
} |
627,10 → 625,9 |
static const struct drm_mode_config_funcs radeon_mode_funcs = { |
// .fb_create = radeon_user_framebuffer_create, |
// .fb_changed = radeonfb_probe, |
.fb_changed = radeonfb_probe, |
}; |
int radeon_modeset_init(struct radeon_device *rdev) |
{ |
/drivers/video/drm/radeon/radeon_drv.c |
---|
0,0 → 1,98 |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "syscall.h" |
int radeon_dynclks = -1; |
static struct pci_device_id pciidlist[] = { |
radeon_PCI_IDS |
}; |
static struct drm_driver kms_driver = { |
.driver_features = |
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM, |
.dev_priv_size = 0, |
.load = radeon_driver_load_kms, |
.firstopen = radeon_driver_firstopen_kms, |
.open = radeon_driver_open_kms, |
.preclose = radeon_driver_preclose_kms, |
.postclose = radeon_driver_postclose_kms, |
.lastclose = radeon_driver_lastclose_kms, |
.unload = radeon_driver_unload_kms, |
.suspend = radeon_suspend_kms, |
.resume = radeon_resume_kms, |
.get_vblank_counter = radeon_get_vblank_counter_kms, |
.enable_vblank = radeon_enable_vblank_kms, |
.disable_vblank = radeon_disable_vblank_kms, |
.master_create = radeon_master_create_kms, |
.master_destroy = radeon_master_destroy_kms, |
.irq_preinstall = radeon_driver_irq_preinstall_kms, |
.irq_postinstall = radeon_driver_irq_postinstall_kms, |
.irq_uninstall = radeon_driver_irq_uninstall_kms, |
.irq_handler = radeon_driver_irq_handler_kms, |
.reclaim_buffers = drm_core_reclaim_buffers, |
.get_map_ofs = drm_core_get_map_ofs, |
.get_reg_ofs = drm_core_get_reg_ofs, |
.ioctls = radeon_ioctls_kms, |
.gem_init_object = radeon_gem_object_init, |
.gem_free_object = radeon_gem_object_free, |
.dma_ioctl = radeon_dma_ioctl_kms, |
.fops = { |
.owner = THIS_MODULE, |
.open = drm_open, |
.release = drm_release, |
.ioctl = drm_ioctl, |
.mmap = radeon_mmap, |
.poll = drm_poll, |
.fasync = drm_fasync, |
}, |
.pci_driver = { |
.name = DRIVER_NAME, |
.id_table = pciidlist, |
.probe = radeon_pci_probe, |
.remove = radeon_pci_remove, |
.suspend = radeon_pci_suspend, |
.resume = radeon_pci_resume, |
}, |
.name = DRIVER_NAME, |
.desc = DRIVER_DESC, |
.date = DRIVER_DATE, |
.major = KMS_DRIVER_MAJOR, |
.minor = KMS_DRIVER_MINOR, |
.patchlevel = KMS_DRIVER_PATCHLEVEL, |
}; |
static int __init radeon_init(void) |
{ |
radeon_modeset = 1; |
driver = &kms_driver; |
driver->driver_features |= DRIVER_MODESET; |
driver->num_ioctls = radeon_max_kms_ioctl; |
return drm_init(driver); |
} |
struct pci_driver |
{ |
struct list_head node; |
char *name; |
const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ |
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ |
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ |
int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ |
int (*suspend_late) (struct pci_dev *dev, pm_message_t state); |
int (*resume_early) (struct pci_dev *dev); |
int (*resume) (struct pci_dev *dev); /* Device woken up */ |
void (*shutdown) (struct pci_dev *dev); |
struct pci_error_handlers *err_handler; |
struct device_driver driver; |
struct pci_dynids dynids; |
}; |
/drivers/video/drm/radeon/radeon_encoders.c |
---|
24,11 → 24,6 |
* Alex Deucher |
*/ |
#include "drmP.h" |
//#include <types.h> |
//#include <list.h> |
//#include <syscall.h> |
#include "drm_crtc.h" |
#include "drm_crtc_helper.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
1711,4 → 1706,3 |
break; |
} |
} |
/drivers/video/drm/radeon/radeon_fb.c |
---|
0,0 → 1,1175 |
/* |
* Copyright © 2007 David Airlie |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* David Airlie |
*/ |
/* |
* Modularization |
*/ |
//#include <linux/module.h> |
//#include <linux/kernel.h> |
//#include <linux/errno.h> |
//#include <linux/string.h> |
//#include <linux/mm.h> |
//#include <linux/tty.h> |
//#include <linux/slab.h> |
//#include <linux/delay.h> |
//#include <linux/fb.h> |
//#include <linux/init.h> |
#include "drmP.h" |
#include "drm.h" |
#include "drm_crtc.h" |
#include "drm_crtc_helper.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
#include <drm_mm.h> |
#include "radeon_object.h" |
#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ |
#define FB_VISUAL_TRUECOLOR 2 /* True color */ |
struct fb_fix_screeninfo { |
char id[16]; /* identification string eg "TT Builtin" */ |
unsigned long smem_start; /* Start of frame buffer mem */ |
/* (physical address) */ |
__u32 smem_len; /* Length of frame buffer mem */ |
__u32 type; /* see FB_TYPE_* */ |
__u32 type_aux; /* Interleave for interleaved Planes */ |
__u32 visual; /* see FB_VISUAL_* */ |
__u16 xpanstep; /* zero if no hardware panning */ |
__u16 ypanstep; /* zero if no hardware panning */ |
__u16 ywrapstep; /* zero if no hardware ywrap */ |
__u32 line_length; /* length of a line in bytes */ |
unsigned long mmio_start; /* Start of Memory Mapped I/O */ |
/* (physical address) */ |
__u32 mmio_len; /* Length of Memory Mapped I/O */ |
__u32 accel; /* Indicate to driver which */ |
/* specific chip/card we have */ |
__u16 reserved[3]; /* Reserved for future compatibility */ |
}; |
struct fb_bitfield { |
__u32 offset; /* beginning of bitfield */ |
__u32 length; /* length of bitfield */ |
__u32 msb_right; /* != 0 : Most significant bit is */ |
/* right */ |
}; |
struct fb_var_screeninfo { |
__u32 xres; /* visible resolution */ |
__u32 yres; |
__u32 xres_virtual; /* virtual resolution */ |
__u32 yres_virtual; |
__u32 xoffset; /* offset from virtual to visible */ |
__u32 yoffset; /* resolution */ |
__u32 bits_per_pixel; /* guess what */ |
__u32 grayscale; /* != 0 Graylevels instead of colors */ |
struct fb_bitfield red; /* bitfield in fb mem if true color, */ |
struct fb_bitfield green; /* else only length is significant */ |
struct fb_bitfield blue; |
struct fb_bitfield transp; /* transparency */ |
__u32 nonstd; /* != 0 Non standard pixel format */ |
__u32 activate; /* see FB_ACTIVATE_* */ |
__u32 height; /* height of picture in mm */ |
__u32 width; /* width of picture in mm */ |
__u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ |
/* Timing: All values in pixclocks, except pixclock (of course) */ |
__u32 pixclock; /* pixel clock in ps (pico seconds) */ |
__u32 left_margin; /* time from sync to picture */ |
__u32 right_margin; /* time from picture to sync */ |
__u32 upper_margin; /* time from sync to picture */ |
__u32 lower_margin; |
__u32 hsync_len; /* length of horizontal sync */ |
__u32 vsync_len; /* length of vertical sync */ |
__u32 sync; /* see FB_SYNC_* */ |
__u32 vmode; /* see FB_VMODE_* */ |
__u32 rotate; /* angle we rotate counter clockwise */ |
__u32 reserved[5]; /* Reserved for future compatibility */ |
}; |
struct fb_chroma { |
__u32 redx; /* in fraction of 1024 */ |
__u32 greenx; |
__u32 bluex; |
__u32 whitex; |
__u32 redy; |
__u32 greeny; |
__u32 bluey; |
__u32 whitey; |
}; |
struct fb_videomode { |
const char *name; /* optional */ |
u32 refresh; /* optional */ |
u32 xres; |
u32 yres; |
u32 pixclock; |
u32 left_margin; |
u32 right_margin; |
u32 upper_margin; |
u32 lower_margin; |
u32 hsync_len; |
u32 vsync_len; |
u32 sync; |
u32 vmode; |
u32 flag; |
}; |
struct fb_monspecs { |
struct fb_chroma chroma; |
struct fb_videomode *modedb; /* mode database */ |
__u8 manufacturer[4]; /* Manufacturer */ |
__u8 monitor[14]; /* Monitor String */ |
__u8 serial_no[14]; /* Serial Number */ |
__u8 ascii[14]; /* ? */ |
__u32 modedb_len; /* mode database length */ |
__u32 model; /* Monitor Model */ |
__u32 serial; /* Serial Number - Integer */ |
__u32 year; /* Year manufactured */ |
__u32 week; /* Week Manufactured */ |
__u32 hfmin; /* hfreq lower limit (Hz) */ |
__u32 hfmax; /* hfreq upper limit (Hz) */ |
__u32 dclkmin; /* pixelclock lower limit (Hz) */ |
__u32 dclkmax; /* pixelclock upper limit (Hz) */ |
__u16 input; /* display type - see FB_DISP_* */ |
__u16 dpms; /* DPMS support - see FB_DPMS_ */ |
__u16 signal; /* Signal Type - see FB_SIGNAL_* */ |
__u16 vfmin; /* vfreq lower limit (Hz) */ |
__u16 vfmax; /* vfreq upper limit (Hz) */ |
__u16 gamma; /* Gamma - in fractions of 100 */ |
__u16 gtf : 1; /* supports GTF */ |
__u16 misc; /* Misc flags - see FB_MISC_* */ |
__u8 version; /* EDID version... */ |
__u8 revision; /* ...and revision */ |
__u8 max_x; /* Maximum horizontal size (cm) */ |
__u8 max_y; /* Maximum vertical size (cm) */ |
}; |
struct fb_info { |
int node; |
int flags; |
// struct mutex lock; /* Lock for open/release/ioctl funcs */ |
// struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ |
struct fb_var_screeninfo var; /* Current var */ |
struct fb_fix_screeninfo fix; /* Current fix */ |
struct fb_monspecs monspecs; /* Current Monitor specs */ |
// struct work_struct queue; /* Framebuffer event queue */ |
// struct fb_pixmap pixmap; /* Image hardware mapper */ |
// struct fb_pixmap sprite; /* Cursor hardware mapper */ |
// struct fb_cmap cmap; /* Current cmap */ |
struct list_head modelist; /* mode list */ |
struct fb_videomode *mode; /* current mode */ |
#ifdef CONFIG_FB_BACKLIGHT |
/* assigned backlight device */ |
/* set before framebuffer registration, |
remove after unregister */ |
struct backlight_device *bl_dev; |
/* Backlight level curve */ |
struct mutex bl_curve_mutex; |
u8 bl_curve[FB_BACKLIGHT_LEVELS]; |
#endif |
#ifdef CONFIG_FB_DEFERRED_IO |
struct delayed_work deferred_work; |
struct fb_deferred_io *fbdefio; |
#endif |
struct fb_ops *fbops; |
// struct device *device; /* This is the parent */ |
// struct device *dev; /* This is this fb device */ |
int class_flag; /* private sysfs flags */ |
#ifdef CONFIG_FB_TILEBLITTING |
struct fb_tile_ops *tileops; /* Tile Blitting */ |
#endif |
char __iomem *screen_base; /* Virtual address */ |
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ |
void *pseudo_palette; /* Fake palette of 16 colors */ |
#define FBINFO_STATE_RUNNING 0 |
#define FBINFO_STATE_SUSPENDED 1 |
u32 state; /* Hardware state i.e suspend */ |
void *fbcon_par; /* fbcon use-only private area */ |
/* From here on everything is device dependent */ |
void *par; |
/* we need the PCI or similiar aperture base/size not |
smem_start/size as smem_start may just be an object |
allocated inside the aperture so may not actually overlap */ |
resource_size_t aperture_base; |
resource_size_t aperture_size; |
}; |
struct radeon_fb_device { |
struct radeon_device *rdev; |
struct drm_display_mode *mode; |
struct radeon_framebuffer *rfb; |
int crtc_count; |
/* crtc currently bound to this */ |
uint32_t crtc_ids[2]; |
}; |
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
bool interruptible, |
struct drm_gem_object **obj); |
struct fb_info *framebuffer_alloc(size_t size); |
#if 0 |
static int radeonfb_setcolreg(unsigned regno, |
unsigned red, |
unsigned green, |
unsigned blue, |
unsigned transp, |
struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_crtc *crtc; |
int i; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct drm_mode_set *modeset = &radeon_crtc->mode_set; |
struct drm_framebuffer *fb = modeset->fb; |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
if (i == rfbdev->crtc_count) { |
continue; |
} |
if (regno > 255) { |
return 1; |
} |
if (fb->depth == 8) { |
radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno); |
return 0; |
} |
if (regno < 16) { |
switch (fb->depth) { |
case 15: |
fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | |
((green & 0xf800) >> 6) | |
((blue & 0xf800) >> 11); |
break; |
case 16: |
fb->pseudo_palette[regno] = (red & 0xf800) | |
((green & 0xfc00) >> 5) | |
((blue & 0xf800) >> 11); |
break; |
case 24: |
case 32: |
fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | |
(green & 0xff00) | |
((blue & 0xff00) >> 8); |
break; |
} |
} |
} |
return 0; |
} |
static int radeonfb_check_var(struct fb_var_screeninfo *var, |
struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct radeon_framebuffer *rfb = rfbdev->rfb; |
struct drm_framebuffer *fb = &rfb->base; |
int depth; |
if (var->pixclock == -1 || !var->pixclock) { |
return -EINVAL; |
} |
/* Need to resize the fb object !!! */ |
if (var->xres > fb->width || var->yres > fb->height) { |
DRM_ERROR("Requested width/height is greater than current fb " |
"object %dx%d > %dx%d\n", var->xres, var->yres, |
fb->width, fb->height); |
DRM_ERROR("Need resizing code.\n"); |
return -EINVAL; |
} |
switch (var->bits_per_pixel) { |
case 16: |
depth = (var->green.length == 6) ? 16 : 15; |
break; |
case 32: |
depth = (var->transp.length > 0) ? 32 : 24; |
break; |
default: |
depth = var->bits_per_pixel; |
break; |
} |
switch (depth) { |
case 8: |
var->red.offset = 0; |
var->green.offset = 0; |
var->blue.offset = 0; |
var->red.length = 8; |
var->green.length = 8; |
var->blue.length = 8; |
var->transp.length = 0; |
var->transp.offset = 0; |
break; |
case 15: |
var->red.offset = 10; |
var->green.offset = 5; |
var->blue.offset = 0; |
var->red.length = 5; |
var->green.length = 5; |
var->blue.length = 5; |
var->transp.length = 1; |
var->transp.offset = 15; |
break; |
case 16: |
var->red.offset = 11; |
var->green.offset = 5; |
var->blue.offset = 0; |
var->red.length = 5; |
var->green.length = 6; |
var->blue.length = 5; |
var->transp.length = 0; |
var->transp.offset = 0; |
break; |
case 24: |
var->red.offset = 16; |
var->green.offset = 8; |
var->blue.offset = 0; |
var->red.length = 8; |
var->green.length = 8; |
var->blue.length = 8; |
var->transp.length = 0; |
var->transp.offset = 0; |
break; |
case 32: |
var->red.offset = 16; |
var->green.offset = 8; |
var->blue.offset = 0; |
var->red.length = 8; |
var->green.length = 8; |
var->blue.length = 8; |
var->transp.length = 8; |
var->transp.offset = 24; |
break; |
default: |
return -EINVAL; |
} |
return 0; |
} |
#endif |
/* this will let fbcon do the mode init */ |
static int radeonfb_set_par(struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct fb_var_screeninfo *var = &info->var; |
struct drm_crtc *crtc; |
int ret; |
int i; |
if (var->pixclock != -1) { |
DRM_ERROR("PIXEL CLCOK SET\n"); |
return -EINVAL; |
} |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
if (i == rfbdev->crtc_count) { |
continue; |
} |
if (crtc->fb == radeon_crtc->mode_set.fb) { |
// mutex_lock(&dev->mode_config.mutex); |
ret = crtc->funcs->set_config(&radeon_crtc->mode_set); |
// mutex_unlock(&dev->mode_config.mutex); |
if (ret) { |
return ret; |
} |
} |
} |
return 0; |
} |
#if 0 |
static int radeonfb_pan_display(struct fb_var_screeninfo *var, |
struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_mode_set *modeset; |
struct drm_crtc *crtc; |
struct radeon_crtc *radeon_crtc; |
int ret = 0; |
int i; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
if (i == rfbdev->crtc_count) { |
continue; |
} |
radeon_crtc = to_radeon_crtc(crtc); |
modeset = &radeon_crtc->mode_set; |
modeset->x = var->xoffset; |
modeset->y = var->yoffset; |
if (modeset->num_connectors) { |
mutex_lock(&dev->mode_config.mutex); |
ret = crtc->funcs->set_config(modeset); |
mutex_unlock(&dev->mode_config.mutex); |
if (!ret) { |
info->var.xoffset = var->xoffset; |
info->var.yoffset = var->yoffset; |
} |
} |
} |
return ret; |
} |
static void radeonfb_on(struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_crtc *crtc; |
struct drm_encoder *encoder; |
int i; |
/* |
* For each CRTC in this fb, find all associated encoders |
* and turn them off, then turn off the CRTC. |
*/ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
mutex_lock(&dev->mode_config.mutex); |
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
mutex_unlock(&dev->mode_config.mutex); |
/* Found a CRTC on this fb, now find encoders */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc == crtc) { |
struct drm_encoder_helper_funcs *encoder_funcs; |
encoder_funcs = encoder->helper_private; |
mutex_lock(&dev->mode_config.mutex); |
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
mutex_unlock(&dev->mode_config.mutex); |
} |
} |
} |
} |
static void radeonfb_off(struct fb_info *info, int dpms_mode) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_crtc *crtc; |
struct drm_encoder *encoder; |
int i; |
/* |
* For each CRTC in this fb, find all associated encoders |
* and turn them off, then turn off the CRTC. |
*/ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
/* Found a CRTC on this fb, now find encoders */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc == crtc) { |
struct drm_encoder_helper_funcs *encoder_funcs; |
encoder_funcs = encoder->helper_private; |
mutex_lock(&dev->mode_config.mutex); |
encoder_funcs->dpms(encoder, dpms_mode); |
mutex_unlock(&dev->mode_config.mutex); |
} |
} |
if (dpms_mode == DRM_MODE_DPMS_OFF) { |
mutex_lock(&dev->mode_config.mutex); |
crtc_funcs->dpms(crtc, dpms_mode); |
mutex_unlock(&dev->mode_config.mutex); |
} |
} |
} |
int radeonfb_blank(int blank, struct fb_info *info) |
{ |
switch (blank) { |
case FB_BLANK_UNBLANK: |
radeonfb_on(info); |
break; |
case FB_BLANK_NORMAL: |
radeonfb_off(info, DRM_MODE_DPMS_STANDBY); |
break; |
case FB_BLANK_HSYNC_SUSPEND: |
radeonfb_off(info, DRM_MODE_DPMS_STANDBY); |
break; |
case FB_BLANK_VSYNC_SUSPEND: |
radeonfb_off(info, DRM_MODE_DPMS_SUSPEND); |
break; |
case FB_BLANK_POWERDOWN: |
radeonfb_off(info, DRM_MODE_DPMS_OFF); |
break; |
} |
return 0; |
} |
static struct fb_ops radeonfb_ops = { |
.owner = THIS_MODULE, |
.fb_check_var = radeonfb_check_var, |
.fb_set_par = radeonfb_set_par, |
.fb_setcolreg = radeonfb_setcolreg, |
.fb_fillrect = cfb_fillrect, |
.fb_copyarea = cfb_copyarea, |
.fb_imageblit = cfb_imageblit, |
.fb_pan_display = radeonfb_pan_display, |
.fb_blank = radeonfb_blank, |
}; |
/** |
* Curretly it is assumed that the old framebuffer is reused. |
* |
* LOCKING |
* caller should hold the mode config lock. |
* |
*/ |
int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) |
{ |
struct fb_info *info; |
struct drm_framebuffer *fb; |
struct drm_display_mode *mode = crtc->desired_mode; |
fb = crtc->fb; |
if (fb == NULL) { |
return 1; |
} |
info = fb->fbdev; |
if (info == NULL) { |
return 1; |
} |
if (mode == NULL) { |
return 1; |
} |
info->var.xres = mode->hdisplay; |
info->var.right_margin = mode->hsync_start - mode->hdisplay; |
info->var.hsync_len = mode->hsync_end - mode->hsync_start; |
info->var.left_margin = mode->htotal - mode->hsync_end; |
info->var.yres = mode->vdisplay; |
info->var.lower_margin = mode->vsync_start - mode->vdisplay; |
info->var.vsync_len = mode->vsync_end - mode->vsync_start; |
info->var.upper_margin = mode->vtotal - mode->vsync_end; |
info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; |
/* avoid overflow */ |
info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; |
return 0; |
} |
EXPORT_SYMBOL(radeonfb_resize); |
static struct drm_mode_set panic_mode; |
int radeonfb_panic(struct notifier_block *n, unsigned long ununsed, |
void *panic_str) |
{ |
DRM_ERROR("panic occurred, switching back to text console\n"); |
drm_crtc_helper_set_config(&panic_mode); |
return 0; |
} |
EXPORT_SYMBOL(radeonfb_panic); |
static struct notifier_block paniced = { |
.notifier_call = radeonfb_panic, |
}; |
#endif |
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) |
{ |
int aligned = width; |
int align_large = (ASIC_IS_AVIVO(rdev)); |
int pitch_mask = 0; |
switch (bpp / 8) { |
case 1: |
pitch_mask = align_large ? 255 : 127; |
break; |
case 2: |
pitch_mask = align_large ? 127 : 31; |
break; |
case 3: |
case 4: |
pitch_mask = align_large ? 63 : 15; |
break; |
} |
aligned += pitch_mask; |
aligned &= ~pitch_mask; |
return aligned; |
} |
int radeonfb_create(struct radeon_device *rdev, |
uint32_t fb_width, uint32_t fb_height, |
uint32_t surface_width, uint32_t surface_height, |
struct radeon_framebuffer **rfb_p) |
{ |
struct fb_info *info; |
struct radeon_fb_device *rfbdev; |
struct drm_framebuffer *fb = NULL; |
struct radeon_framebuffer *rfb; |
struct drm_mode_fb_cmd mode_cmd; |
struct drm_gem_object *gobj = NULL; |
struct radeon_object *robj = NULL; |
// struct device *device = &rdev->pdev->dev; |
int size, aligned_size, ret; |
u64 fb_gpuaddr; |
void *fbptr = NULL; |
unsigned long tmp; |
ENTRY(); |
mode_cmd.width = surface_width; |
mode_cmd.height = surface_height; |
mode_cmd.bpp = 32; |
/* need to align pitch with crtc limits */ |
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); |
mode_cmd.depth = 32; |
size = mode_cmd.pitch * mode_cmd.height; |
aligned_size = ALIGN(size, PAGE_SIZE); |
ret = radeon_gem_fb_object_create(rdev, aligned_size, 0, |
RADEON_GEM_DOMAIN_VRAM, |
false, 0, |
false, &gobj); |
if (ret) { |
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
surface_width, surface_height); |
ret = -ENOMEM; |
goto out; |
} |
robj = gobj->driver_private; |
// mutex_lock(&rdev->ddev->struct_mutex); |
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
if (fb == NULL) { |
DRM_ERROR("failed to allocate fb.\n"); |
ret = -ENOMEM; |
goto out_unref; |
} |
ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); |
if (ret) { |
printk(KERN_ERR "failed to pin framebuffer\n"); |
ret = -ENOMEM; |
goto out_unref; |
} |
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
rfb = to_radeon_framebuffer(fb); |
*rfb_p = rfb; |
rdev->fbdev_rfb = rfb; |
rdev->fbdev_robj = robj; |
info = framebuffer_alloc(sizeof(struct radeon_fb_device)); |
if (info == NULL) { |
ret = -ENOMEM; |
goto out_unref; |
} |
rfbdev = info->par; |
// ret = radeon_object_kmap(robj, &fbptr); |
// if (ret) { |
// goto out_unref; |
// } |
fbptr = (void*)0xFE000000; // LFB_BASE |
strcpy(info->fix.id, "radeondrmfb"); |
info->fix.type = FB_TYPE_PACKED_PIXELS; |
info->fix.visual = FB_VISUAL_TRUECOLOR; |
info->fix.type_aux = 0; |
info->fix.xpanstep = 1; /* doing it in hw */ |
info->fix.ypanstep = 1; /* doing it in hw */ |
info->fix.ywrapstep = 0; |
// info->fix.accel = FB_ACCEL_NONE; |
info->fix.type_aux = 0; |
// info->flags = FBINFO_DEFAULT; |
// info->fbops = &radeonfb_ops; |
info->fix.line_length = fb->pitch; |
tmp = fb_gpuaddr - rdev->mc.vram_location; |
info->fix.smem_start = rdev->mc.aper_base + tmp; |
info->fix.smem_len = size; |
info->screen_base = fbptr; |
info->screen_size = size; |
info->pseudo_palette = fb->pseudo_palette; |
info->var.xres_virtual = fb->width; |
info->var.yres_virtual = fb->height; |
info->var.bits_per_pixel = fb->bits_per_pixel; |
info->var.xoffset = 0; |
info->var.yoffset = 0; |
// info->var.activate = FB_ACTIVATE_NOW; |
info->var.height = -1; |
info->var.width = -1; |
info->var.xres = fb_width; |
info->var.yres = fb_height; |
info->fix.mmio_start = 0; |
info->fix.mmio_len = 0; |
// info->pixmap.size = 64*1024; |
// info->pixmap.buf_align = 8; |
// info->pixmap.access_align = 32; |
// info->pixmap.flags = FB_PIXMAP_SYSTEM; |
// info->pixmap.scan_align = 1; |
if (info->screen_base == NULL) { |
ret = -ENOSPC; |
goto out_unref; |
} |
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
DRM_INFO("size %lu\n", (unsigned long)size); |
DRM_INFO("fb depth is %d\n", fb->depth); |
DRM_INFO(" pitch is %d\n", fb->pitch); |
switch (fb->depth) { |
case 8: |
info->var.red.offset = 0; |
info->var.green.offset = 0; |
info->var.blue.offset = 0; |
info->var.red.length = 8; /* 8bit DAC */ |
info->var.green.length = 8; |
info->var.blue.length = 8; |
info->var.transp.offset = 0; |
info->var.transp.length = 0; |
break; |
case 15: |
info->var.red.offset = 10; |
info->var.green.offset = 5; |
info->var.blue.offset = 0; |
info->var.red.length = 5; |
info->var.green.length = 5; |
info->var.blue.length = 5; |
info->var.transp.offset = 15; |
info->var.transp.length = 1; |
break; |
case 16: |
info->var.red.offset = 11; |
info->var.green.offset = 5; |
info->var.blue.offset = 0; |
info->var.red.length = 5; |
info->var.green.length = 6; |
info->var.blue.length = 5; |
info->var.transp.offset = 0; |
break; |
case 24: |
info->var.red.offset = 16; |
info->var.green.offset = 8; |
info->var.blue.offset = 0; |
info->var.red.length = 8; |
info->var.green.length = 8; |
info->var.blue.length = 8; |
info->var.transp.offset = 0; |
info->var.transp.length = 0; |
break; |
case 32: |
info->var.red.offset = 16; |
info->var.green.offset = 8; |
info->var.blue.offset = 0; |
info->var.red.length = 8; |
info->var.green.length = 8; |
info->var.blue.length = 8; |
info->var.transp.offset = 24; |
info->var.transp.length = 8; |
break; |
default: |
break; |
} |
dbgprintf("fb = %x\n", fb); |
fb->fbdev = info; |
rfbdev->rfb = rfb; |
rfbdev->rdev = rdev; |
// mutex_unlock(&rdev->ddev->struct_mutex); |
return 0; |
out_unref: |
if (robj) { |
// radeon_object_kunmap(robj); |
} |
if (fb && ret) { |
list_del(&fb->filp_head); |
// drm_gem_object_unreference(gobj); |
// drm_framebuffer_cleanup(fb); |
kfree(fb); |
} |
// drm_gem_object_unreference(gobj); |
// mutex_unlock(&rdev->ddev->struct_mutex); |
out: |
return ret; |
} |
static int radeonfb_single_fb_probe(struct radeon_device *rdev) |
{ |
struct drm_crtc *crtc; |
struct drm_connector *connector; |
unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; |
unsigned int surface_width = 0, surface_height = 0; |
int new_fb = 0; |
int crtc_count = 0; |
int ret, i, conn_count = 0; |
struct radeon_framebuffer *rfb; |
struct fb_info *info; |
struct radeon_fb_device *rfbdev; |
struct drm_mode_set *modeset = NULL; |
ENTRY(); |
/* first up get a count of crtcs now in use and new min/maxes width/heights */ |
list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) { |
if (drm_helper_crtc_in_use(crtc)) { |
if (crtc->desired_mode) { |
if (crtc->desired_mode->hdisplay < fb_width) |
fb_width = crtc->desired_mode->hdisplay; |
if (crtc->desired_mode->vdisplay < fb_height) |
fb_height = crtc->desired_mode->vdisplay; |
if (crtc->desired_mode->hdisplay > surface_width) |
surface_width = crtc->desired_mode->hdisplay; |
if (crtc->desired_mode->vdisplay > surface_height) |
surface_height = crtc->desired_mode->vdisplay; |
} |
crtc_count++; |
} |
} |
if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { |
/* hmm everyone went away - assume VGA cable just fell out |
and will come back later. */ |
dbgprintf("crtc count %x width %x height %x\n", |
crtc_count, fb_width, fb_height); |
return 0; |
} |
/* do we have an fb already? */ |
if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) { |
/* create an fb if we don't have one */ |
ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb); |
if (ret) { |
return -EINVAL; |
} |
new_fb = 1; |
} else { |
struct drm_framebuffer *fb; |
fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head); |
rfb = to_radeon_framebuffer(fb); |
/* if someone hotplugs something bigger than we have already allocated, we are pwned. |
As really we can't resize an fbdev that is in the wild currently due to fbdev |
not really being designed for the lower layers moving stuff around under it. |
- so in the grand style of things - punt. */ |
if ((fb->width < surface_width) || (fb->height < surface_height)) { |
DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); |
return -EINVAL; |
} |
} |
info = rfb->base.fbdev; |
rdev->fbdev_info = info; |
rfbdev = info->par; |
crtc_count = 0; |
/* okay we need to setup new connector sets in the crtcs */ |
list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
modeset = &radeon_crtc->mode_set; |
modeset->fb = &rfb->base; |
conn_count = 0; |
list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) { |
if (connector->encoder) |
if (connector->encoder->crtc == modeset->crtc) { |
modeset->connectors[conn_count] = connector; |
conn_count++; |
if (conn_count > RADEONFB_CONN_LIMIT) |
BUG(); |
} |
} |
for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++) |
modeset->connectors[i] = NULL; |
rfbdev->crtc_ids[crtc_count++] = crtc->base.id; |
modeset->num_connectors = conn_count; |
if (modeset->crtc->desired_mode) { |
if (modeset->mode) { |
drm_mode_destroy(rdev->ddev, modeset->mode); |
} |
modeset->mode = drm_mode_duplicate(rdev->ddev, |
modeset->crtc->desired_mode); |
} |
} |
rfbdev->crtc_count = crtc_count; |
if (new_fb) { |
info->var.pixclock = -1; |
// if (register_framebuffer(info) < 0) |
// return -EINVAL; |
} else { |
radeonfb_set_par(info); |
} |
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, |
info->fix.id); |
/* Switch back to kernel console on panic */ |
// panic_mode = *modeset; |
// atomic_notifier_chain_register(&panic_notifier_list, &paniced); |
// printk(KERN_INFO "registered panic notifier\n"); |
LEAVE(); |
return 0; |
} |
int radeonfb_probe(struct drm_device *dev) |
{ |
int ret; |
/* something has changed in the lower levels of hell - deal with it |
here */ |
/* two modes : a) 1 fb to rule all crtcs. |
b) one fb per crtc. |
two actions 1) new connected device |
2) device removed. |
case a/1 : if the fb surface isn't big enough - resize the surface fb. |
if the fb size isn't big enough - resize fb into surface. |
if everything big enough configure the new crtc/etc. |
case a/2 : undo the configuration |
possibly resize down the fb to fit the new configuration. |
case b/1 : see if it is on a new crtc - setup a new fb and add it. |
case b/2 : teardown the new fb. |
*/ |
ret = radeonfb_single_fb_probe(dev->dev_private); |
return ret; |
} |
EXPORT_SYMBOL(radeonfb_probe); |
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
{ |
struct fb_info *info; |
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
struct radeon_object *robj; |
if (!fb) { |
return -EINVAL; |
} |
info = fb->fbdev; |
if (info) { |
robj = rfb->obj->driver_private; |
// unregister_framebuffer(info); |
// radeon_object_kunmap(robj); |
// radeon_object_unpin(robj); |
// framebuffer_release(info); |
} |
printk(KERN_INFO "unregistered panic notifier\n"); |
// atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); |
// memset(&panic_mode, 0, sizeof(struct drm_mode_set)); |
return 0; |
} |
EXPORT_SYMBOL(radeonfb_remove); |
/** |
* Allocate a GEM object of the specified size with shmfs backing store |
*/ |
struct drm_gem_object * |
drm_gem_object_alloc(struct drm_device *dev, size_t size) |
{ |
struct drm_gem_object *obj; |
BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
obj->dev = dev; |
// obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
// if (IS_ERR(obj->filp)) { |
// kfree(obj); |
// return NULL; |
// } |
// kref_init(&obj->refcount); |
// kref_init(&obj->handlecount); |
obj->size = size; |
// if (dev->driver->gem_init_object != NULL && |
// dev->driver->gem_init_object(obj) != 0) { |
// fput(obj->filp); |
// kfree(obj); |
// return NULL; |
// } |
// atomic_inc(&dev->object_count); |
// atomic_add(obj->size, &dev->object_memory); |
return obj; |
} |
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
bool interruptible, |
struct drm_gem_object **obj) |
{ |
struct drm_gem_object *gobj; |
struct radeon_object *robj; |
*obj = NULL; |
gobj = drm_gem_object_alloc(rdev->ddev, size); |
if (!gobj) { |
return -ENOMEM; |
} |
/* At least align on page size */ |
if (alignment < PAGE_SIZE) { |
alignment = PAGE_SIZE; |
} |
robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
if (!robj) { |
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
size, initial_domain, alignment); |
// mutex_lock(&rdev->ddev->struct_mutex); |
// drm_gem_object_unreference(gobj); |
// mutex_unlock(&rdev->ddev->struct_mutex); |
return -ENOMEM;; |
} |
robj->rdev = rdev; |
robj->gobj = gobj; |
INIT_LIST_HEAD(&robj->list); |
robj->flags = TTM_PL_FLAG_VRAM; |
struct drm_mm_node *vm_node; |
vm_node = kzalloc(sizeof(*vm_node),0); |
vm_node->free = 0; |
vm_node->size = 0x800000 >> 12; |
vm_node->start = 0; |
vm_node->mm = NULL; |
robj->mm_node = vm_node; |
robj->vm_addr = ((uint32_t)robj->mm_node->start); |
gobj->driver_private = robj; |
*obj = gobj; |
return 0; |
} |
struct fb_info *framebuffer_alloc(size_t size) |
{ |
#define BYTES_PER_LONG (BITS_PER_LONG/8) |
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG)) |
int fb_info_size = sizeof(struct fb_info); |
struct fb_info *info; |
char *p; |
if (size) |
fb_info_size += PADDING; |
p = kzalloc(fb_info_size + size, GFP_KERNEL); |
if (!p) |
return NULL; |
info = (struct fb_info *) p; |
if (size) |
info->par = p + fb_info_size; |
return info; |
#undef PADDING |
#undef BYTES_PER_LONG |
} |
/drivers/video/drm/radeon/radeon_gem.c |
---|
0,0 → 1,314 |
/* |
* Copyright 2008 Advanced Micro Devices, Inc. |
* Copyright 2008 Red Hat Inc. |
* Copyright 2009 Jerome Glisse. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
* |
* Authors: Dave Airlie |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
#define TTM_PL_SYSTEM 0 |
#define TTM_PL_TT 1 |
#define TTM_PL_VRAM 2 |
#define TTM_PL_PRIV0 3 |
#define TTM_PL_PRIV1 4 |
#define TTM_PL_PRIV2 5 |
#define TTM_PL_PRIV3 6 |
#define TTM_PL_PRIV4 7 |
#define TTM_PL_PRIV5 8 |
#define TTM_PL_SWAPPED 15 |
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) |
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) |
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) |
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) |
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) |
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) |
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) |
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) |
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) |
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) |
#define TTM_PL_MASK_MEM 0x0000FFFF |
int radeon_gem_object_init(struct drm_gem_object *obj) |
{ |
/* we do nothings here */ |
return 0; |
} |
void radeon_gem_object_free(struct drm_gem_object *gobj) |
{ |
struct radeon_object *robj = gobj->driver_private; |
gobj->driver_private = NULL; |
if (robj) { |
// radeon_object_unref(&robj); |
} |
} |
int radeon_gem_object_create(struct radeon_device *rdev, int size, |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
bool interruptible, |
struct drm_gem_object **obj) |
{ |
struct drm_gem_object *gobj; |
struct radeon_object *robj; |
int r; |
*obj = NULL; |
gobj = drm_gem_object_alloc(rdev->ddev, size); |
if (!gobj) { |
return -ENOMEM; |
} |
/* At least align on page size */ |
if (alignment < PAGE_SIZE) { |
alignment = PAGE_SIZE; |
} |
r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, |
interruptible, &robj); |
if (r) { |
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
size, initial_domain, alignment); |
// mutex_lock(&rdev->ddev->struct_mutex); |
// drm_gem_object_unreference(gobj); |
// mutex_unlock(&rdev->ddev->struct_mutex); |
return r; |
} |
gobj->driver_private = robj; |
*obj = gobj; |
return 0; |
} |
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
uint64_t *gpu_addr) |
{ |
struct radeon_object *robj = obj->driver_private; |
uint32_t flags; |
switch (pin_domain) { |
case RADEON_GEM_DOMAIN_VRAM: |
flags = TTM_PL_FLAG_VRAM; |
break; |
case RADEON_GEM_DOMAIN_GTT: |
flags = TTM_PL_FLAG_TT; |
break; |
default: |
flags = TTM_PL_FLAG_SYSTEM; |
break; |
} |
return radeon_object_pin(robj, flags, gpu_addr); |
} |
void radeon_gem_object_unpin(struct drm_gem_object *obj) |
{ |
struct radeon_object *robj = obj->driver_private; |
// radeon_object_unpin(robj); |
} |
int radeon_gem_set_domain(struct drm_gem_object *gobj, |
uint32_t rdomain, uint32_t wdomain) |
{ |
struct radeon_object *robj; |
uint32_t domain; |
int r; |
/* FIXME: reeimplement */ |
robj = gobj->driver_private; |
/* work out where to validate the buffer to */ |
domain = wdomain; |
if (!domain) { |
domain = rdomain; |
} |
if (!domain) { |
/* Do nothings */ |
printk(KERN_WARNING "Set domain withou domain !\n"); |
return 0; |
} |
if (domain == RADEON_GEM_DOMAIN_CPU) { |
/* Asking for cpu access wait for object idle */ |
// r = radeon_object_wait(robj); |
if (r) { |
printk(KERN_ERR "Failed to wait for object !\n"); |
return r; |
} |
} |
return 0; |
} |
int radeon_gem_init(struct radeon_device *rdev) |
{ |
INIT_LIST_HEAD(&rdev->gem.objects); |
return 0; |
} |
void radeon_gem_fini(struct radeon_device *rdev) |
{ |
// radeon_object_force_delete(rdev); |
} |
#if 0 |
/* |
* GEM ioctls. |
*/ |
int radeon_gem_info_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_radeon_gem_info *args = data; |
args->vram_size = rdev->mc.vram_size; |
/* FIXME: report somethings that makes sense */ |
args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); |
args->gart_size = rdev->mc.gtt_size; |
return 0; |
} |
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
/* TODO: implement */ |
DRM_ERROR("unimplemented %s\n", __func__); |
return -ENOSYS; |
} |
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
/* TODO: implement */ |
DRM_ERROR("unimplemented %s\n", __func__); |
return -ENOSYS; |
} |
int radeon_gem_create_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_radeon_gem_create *args = data; |
struct drm_gem_object *gobj; |
uint32_t handle; |
int r; |
/* create a gem object to contain this object in */ |
args->size = roundup(args->size, PAGE_SIZE); |
r = radeon_gem_object_create(rdev, args->size, args->alignment, |
args->initial_domain, false, |
false, true, &gobj); |
if (r) { |
return r; |
} |
r = drm_gem_handle_create(filp, gobj, &handle); |
if (r) { |
mutex_lock(&dev->struct_mutex); |
drm_gem_object_unreference(gobj); |
mutex_unlock(&dev->struct_mutex); |
return r; |
} |
mutex_lock(&dev->struct_mutex); |
drm_gem_object_handle_unreference(gobj); |
mutex_unlock(&dev->struct_mutex); |
args->handle = handle; |
return 0; |
} |
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
/* transition the BO to a domain - |
* just validate the BO into a certain domain */ |
struct drm_radeon_gem_set_domain *args = data; |
struct drm_gem_object *gobj; |
struct radeon_object *robj; |
int r; |
/* for now if someone requests domain CPU - |
* just make sure the buffer is finished with */ |
/* just do a BO wait for now */ |
gobj = drm_gem_object_lookup(dev, filp, args->handle); |
if (gobj == NULL) { |
return -EINVAL; |
} |
robj = gobj->driver_private; |
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
mutex_lock(&dev->struct_mutex); |
drm_gem_object_unreference(gobj); |
mutex_unlock(&dev->struct_mutex); |
return r; |
} |
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
struct drm_radeon_gem_mmap *args = data; |
struct drm_gem_object *gobj; |
struct radeon_object *robj; |
int r; |
gobj = drm_gem_object_lookup(dev, filp, args->handle); |
if (gobj == NULL) { |
return -EINVAL; |
} |
robj = gobj->driver_private; |
r = radeon_object_mmap(robj, &args->addr_ptr); |
mutex_lock(&dev->struct_mutex); |
drm_gem_object_unreference(gobj); |
mutex_unlock(&dev->struct_mutex); |
return r; |
} |
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
/* FIXME: implement */ |
return 0; |
} |
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
struct drm_radeon_gem_wait_idle *args = data; |
struct drm_gem_object *gobj; |
struct radeon_object *robj; |
int r; |
gobj = drm_gem_object_lookup(dev, filp, args->handle); |
if (gobj == NULL) { |
return -EINVAL; |
} |
robj = gobj->driver_private; |
r = radeon_object_wait(robj); |
mutex_lock(&dev->struct_mutex); |
drm_gem_object_unreference(gobj); |
mutex_unlock(&dev->struct_mutex); |
return r; |
} |
#endif |
/drivers/video/drm/radeon/radeon_object.c |
---|
35,322 → 35,14 |
#include "radeon_drm.h" |
#include "radeon.h" |
#include <drm_mm.h> |
#include "radeon_object.h" |
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
int pages, u32_t *pagelist); |
#define TTM_PL_SYSTEM 0 |
#define TTM_PL_TT 1 |
#define TTM_PL_VRAM 2 |
#define TTM_PL_PRIV0 3 |
#define TTM_PL_PRIV1 4 |
#define TTM_PL_PRIV2 5 |
#define TTM_PL_PRIV3 6 |
#define TTM_PL_PRIV4 7 |
#define TTM_PL_PRIV5 8 |
#define TTM_PL_SWAPPED 15 |
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) |
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) |
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) |
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) |
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) |
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) |
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) |
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) |
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) |
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) |
#define TTM_PL_MASK_MEM 0x0000FFFF |
struct ttm_mem_type_manager { |
/* |
* No protection. Constant from start. |
*/ |
bool has_type; |
bool use_type; |
uint32_t flags; |
unsigned long gpu_offset; |
unsigned long io_offset; |
unsigned long io_size; |
void *io_addr; |
uint64_t size; |
uint32_t available_caching; |
uint32_t default_caching; |
/* |
* Protected by the bdev->lru_lock. |
* TODO: Consider one lru_lock per ttm_mem_type_manager. |
* Plays ill with list removal, though. |
*/ |
struct drm_mm manager; |
struct list_head lru; |
}; |
struct ttm_bo_driver { |
const uint32_t *mem_type_prio; |
const uint32_t *mem_busy_prio; |
uint32_t num_mem_type_prio; |
uint32_t num_mem_busy_prio; |
/** |
* struct ttm_bo_driver member create_ttm_backend_entry |
* |
* @bdev: The buffer object device. |
* |
* Create a driver specific struct ttm_backend. |
*/ |
// struct ttm_backend *(*create_ttm_backend_entry)(struct ttm_bo_device *bdev); |
/** |
* struct ttm_bo_driver member invalidate_caches |
* |
* @bdev: the buffer object device. |
* @flags: new placement of the rebound buffer object. |
* |
* A previosly evicted buffer has been rebound in a |
* potentially new location. Tell the driver that it might |
* consider invalidating read (texture) caches on the next command |
* submission as a consequence. |
*/ |
// int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); |
// int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, |
// struct ttm_mem_type_manager *man); |
/** |
* struct ttm_bo_driver member evict_flags: |
* |
* @bo: the buffer object to be evicted |
* |
* Return the bo flags for a buffer which is not mapped to the hardware. |
* These will be placed in proposed_flags so that when the move is |
* finished, they'll end up in bo->mem.flags |
*/ |
// uint32_t(*evict_flags) (struct ttm_buffer_object *bo); |
/** |
* struct ttm_bo_driver member move: |
* |
* @bo: the buffer to move |
* @evict: whether this motion is evicting the buffer from |
* the graphics address space |
* @interruptible: Use interruptible sleeps if possible when sleeping. |
* @no_wait: whether this should give up and return -EBUSY |
* if this move would require sleeping |
* @new_mem: the new memory region receiving the buffer |
* |
* Move a buffer between two memory regions. |
*/ |
// int (*move) (struct ttm_buffer_object *bo, |
// bool evict, bool interruptible, |
// bool no_wait, struct ttm_mem_reg *new_mem); |
/** |
* struct ttm_bo_driver_member verify_access |
* |
* @bo: Pointer to a buffer object. |
* @filp: Pointer to a struct file trying to access the object. |
* |
* Called from the map / write / read methods to verify that the |
* caller is permitted to access the buffer object. |
* This member may be set to NULL, which will refuse this kind of |
* access for all buffer objects. |
* This function should return 0 if access is granted, -EPERM otherwise. |
*/ |
// int (*verify_access) (struct ttm_buffer_object *bo, |
// struct file *filp); |
/** |
* In case a driver writer dislikes the TTM fence objects, |
* the driver writer can replace those with sync objects of |
* his / her own. If it turns out that no driver writer is |
* using these. I suggest we remove these hooks and plug in |
* fences directly. The bo driver needs the following functionality: |
* See the corresponding functions in the fence object API |
* documentation. |
*/ |
// bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); |
// int (*sync_obj_wait) (void *sync_obj, void *sync_arg, |
// bool lazy, bool interruptible); |
// int (*sync_obj_flush) (void *sync_obj, void *sync_arg); |
// void (*sync_obj_unref) (void **sync_obj); |
// void *(*sync_obj_ref) (void *sync_obj); |
}; |
#define TTM_NUM_MEM_TYPES 8 |
struct ttm_bo_device { |
/* |
* Constant after bo device init / atomic. |
*/ |
// struct ttm_mem_global *mem_glob; |
struct ttm_bo_driver *driver; |
// struct page *dummy_read_page; |
// struct ttm_mem_shrink shrink; |
size_t ttm_bo_extra_size; |
size_t ttm_bo_size; |
// rwlock_t vm_lock; |
/* |
* Protected by the vm lock. |
*/ |
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
// struct rb_root addr_space_rb; |
struct drm_mm addr_space_mm; |
/* |
* Might want to change this to one lock per manager. |
*/ |
// spinlock_t lru_lock; |
/* |
* Protected by the lru lock. |
*/ |
struct list_head ddestroy; |
struct list_head swap_lru; |
/* |
* Protected by load / firstopen / lastclose /unload sync. |
*/ |
bool nice_mode; |
// struct address_space *dev_mapping; |
/* |
* Internal protection. |
*/ |
// struct delayed_work wq; |
}; |
struct ttm_mem_reg { |
struct drm_mm_node *mm_node; |
unsigned long size; |
unsigned long num_pages; |
uint32_t page_alignment; |
uint32_t mem_type; |
uint32_t placement; |
}; |
enum ttm_bo_type { |
ttm_bo_type_device, |
ttm_bo_type_user, |
ttm_bo_type_kernel |
}; |
struct ttm_buffer_object { |
/** |
* Members constant at init. |
*/ |
struct ttm_bo_device *bdev; |
unsigned long buffer_start; |
enum ttm_bo_type type; |
void (*destroy) (struct ttm_buffer_object *); |
unsigned long num_pages; |
uint64_t addr_space_offset; |
size_t acc_size; |
/** |
* Members not needing protection. |
*/ |
// struct kref kref; |
// struct kref list_kref; |
// wait_queue_head_t event_queue; |
// spinlock_t lock; |
/** |
* Members protected by the bo::reserved lock. |
*/ |
uint32_t proposed_placement; |
struct ttm_mem_reg mem; |
// struct file *persistant_swap_storage; |
// struct ttm_tt *ttm; |
bool evicted; |
/** |
* Members protected by the bo::reserved lock only when written to. |
*/ |
// atomic_t cpu_writers; |
/** |
* Members protected by the bdev::lru_lock. |
*/ |
struct list_head lru; |
struct list_head ddestroy; |
struct list_head swap; |
uint32_t val_seq; |
bool seq_valid; |
/** |
* Members protected by the bdev::lru_lock |
* only when written to. |
*/ |
// atomic_t reserved; |
/** |
* Members protected by the bo::lock |
*/ |
void *sync_obj_arg; |
void *sync_obj; |
unsigned long priv_flags; |
/** |
* Members protected by the bdev::vm_lock |
*/ |
// struct rb_node vm_rb; |
struct drm_mm_node *vm_node; |
/** |
* Special members that are protected by the reserve lock |
* and the bo::lock when written to. Can be read with |
* either of these locks held. |
*/ |
unsigned long offset; |
uint32_t cur_placement; |
}; |
struct radeon_object |
{ |
struct ttm_buffer_object tobj; |
struct list_head list; |
struct radeon_device *rdev; |
// struct drm_gem_object *gobj; |
// struct ttm_bo_kmap_obj kmap; |
unsigned pin_count; |
uint64_t gpu_addr; |
void *kptr; |
bool is_iomem; |
struct drm_mm_node *mm_node; |
u32_t vm_addr; |
u32_t cpu_addr; |
u32_t flags; |
}; |
static struct drm_mm mm_gtt; |
static struct drm_mm mm_vram; |
/drivers/video/drm/radeon/radeon_object.h |
---|
0,0 → 1,355 |
/* |
* Copyright 2008 Advanced Micro Devices, Inc. |
* Copyright 2008 Red Hat Inc. |
* Copyright 2009 Jerome Glisse. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
* |
* Authors: Dave Airlie |
* Alex Deucher |
* Jerome Glisse |
*/ |
#ifndef __RADEON_OBJECT_H__ |
#define __RADEON_OBJECT_H__ |
//#include <ttm/ttm_bo_api.h> |
//#include <ttm/ttm_bo_driver.h> |
//#include <ttm/ttm_placement.h> |
//#include <ttm/ttm_module.h> |
/* |
* TTM. |
*/ |
//struct radeon_mman { |
// struct ttm_global_reference mem_global_ref; |
// bool mem_global_referenced; |
// struct ttm_bo_device bdev; |
//}; |
#define TTM_PL_SYSTEM 0 |
#define TTM_PL_TT 1 |
#define TTM_PL_VRAM 2 |
#define TTM_PL_PRIV0 3 |
#define TTM_PL_PRIV1 4 |
#define TTM_PL_PRIV2 5 |
#define TTM_PL_PRIV3 6 |
#define TTM_PL_PRIV4 7 |
#define TTM_PL_PRIV5 8 |
#define TTM_PL_SWAPPED 15 |
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) |
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) |
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) |
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) |
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) |
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) |
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) |
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) |
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) |
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) |
#define TTM_PL_MASK_MEM 0x0000FFFF |
struct ttm_mem_type_manager { |
/* |
* No protection. Constant from start. |
*/ |
bool has_type; |
bool use_type; |
uint32_t flags; |
unsigned long gpu_offset; |
unsigned long io_offset; |
unsigned long io_size; |
void *io_addr; |
uint64_t size; |
uint32_t available_caching; |
uint32_t default_caching; |
/* |
* Protected by the bdev->lru_lock. |
* TODO: Consider one lru_lock per ttm_mem_type_manager. |
* Plays ill with list removal, though. |
*/ |
struct drm_mm manager; |
struct list_head lru; |
}; |
struct ttm_bo_driver { |
const uint32_t *mem_type_prio; |
const uint32_t *mem_busy_prio; |
uint32_t num_mem_type_prio; |
uint32_t num_mem_busy_prio; |
/** |
* struct ttm_bo_driver member create_ttm_backend_entry |
* |
* @bdev: The buffer object device. |
* |
* Create a driver specific struct ttm_backend. |
*/ |
// struct ttm_backend *(*create_ttm_backend_entry)(struct ttm_bo_device *bdev); |
/** |
* struct ttm_bo_driver member invalidate_caches |
* |
* @bdev: the buffer object device. |
* @flags: new placement of the rebound buffer object. |
* |
* A previosly evicted buffer has been rebound in a |
* potentially new location. Tell the driver that it might |
* consider invalidating read (texture) caches on the next command |
* submission as a consequence. |
*/ |
// int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); |
// int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, |
// struct ttm_mem_type_manager *man); |
/** |
* struct ttm_bo_driver member evict_flags: |
* |
* @bo: the buffer object to be evicted |
* |
* Return the bo flags for a buffer which is not mapped to the hardware. |
* These will be placed in proposed_flags so that when the move is |
* finished, they'll end up in bo->mem.flags |
*/ |
// uint32_t(*evict_flags) (struct ttm_buffer_object *bo); |
/** |
* struct ttm_bo_driver member move: |
* |
* @bo: the buffer to move |
* @evict: whether this motion is evicting the buffer from |
* the graphics address space |
* @interruptible: Use interruptible sleeps if possible when sleeping. |
* @no_wait: whether this should give up and return -EBUSY |
* if this move would require sleeping |
* @new_mem: the new memory region receiving the buffer |
* |
* Move a buffer between two memory regions. |
*/ |
// int (*move) (struct ttm_buffer_object *bo, |
// bool evict, bool interruptible, |
// bool no_wait, struct ttm_mem_reg *new_mem); |
/** |
* struct ttm_bo_driver_member verify_access |
* |
* @bo: Pointer to a buffer object. |
* @filp: Pointer to a struct file trying to access the object. |
* |
* Called from the map / write / read methods to verify that the |
* caller is permitted to access the buffer object. |
* This member may be set to NULL, which will refuse this kind of |
* access for all buffer objects. |
* This function should return 0 if access is granted, -EPERM otherwise. |
*/ |
// int (*verify_access) (struct ttm_buffer_object *bo, |
// struct file *filp); |
/** |
* In case a driver writer dislikes the TTM fence objects, |
* the driver writer can replace those with sync objects of |
* his / her own. If it turns out that no driver writer is |
* using these. I suggest we remove these hooks and plug in |
* fences directly. The bo driver needs the following functionality: |
* See the corresponding functions in the fence object API |
* documentation. |
*/ |
// bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); |
// int (*sync_obj_wait) (void *sync_obj, void *sync_arg, |
// bool lazy, bool interruptible); |
// int (*sync_obj_flush) (void *sync_obj, void *sync_arg); |
// void (*sync_obj_unref) (void **sync_obj); |
// void *(*sync_obj_ref) (void *sync_obj); |
}; |
#define TTM_NUM_MEM_TYPES 8 |
struct ttm_bo_device { |
/* |
* Constant after bo device init / atomic. |
*/ |
// struct ttm_mem_global *mem_glob; |
struct ttm_bo_driver *driver; |
// struct page *dummy_read_page; |
// struct ttm_mem_shrink shrink; |
size_t ttm_bo_extra_size; |
size_t ttm_bo_size; |
// rwlock_t vm_lock; |
/* |
* Protected by the vm lock. |
*/ |
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
// struct rb_root addr_space_rb; |
struct drm_mm addr_space_mm; |
/* |
* Might want to change this to one lock per manager. |
*/ |
// spinlock_t lru_lock; |
/* |
* Protected by the lru lock. |
*/ |
struct list_head ddestroy; |
struct list_head swap_lru; |
/* |
* Protected by load / firstopen / lastclose /unload sync. |
*/ |
bool nice_mode; |
// struct address_space *dev_mapping; |
/* |
* Internal protection. |
*/ |
// struct delayed_work wq; |
}; |
struct ttm_mem_reg { |
struct drm_mm_node *mm_node; |
unsigned long size; |
unsigned long num_pages; |
uint32_t page_alignment; |
uint32_t mem_type; |
uint32_t placement; |
}; |
enum ttm_bo_type { |
ttm_bo_type_device, |
ttm_bo_type_user, |
ttm_bo_type_kernel |
}; |
struct ttm_buffer_object { |
/** |
* Members constant at init. |
*/ |
struct ttm_bo_device *bdev; |
unsigned long buffer_start; |
enum ttm_bo_type type; |
void (*destroy) (struct ttm_buffer_object *); |
unsigned long num_pages; |
uint64_t addr_space_offset; |
size_t acc_size; |
/** |
* Members not needing protection. |
*/ |
// struct kref kref; |
// struct kref list_kref; |
// wait_queue_head_t event_queue; |
// spinlock_t lock; |
/** |
* Members protected by the bo::reserved lock. |
*/ |
uint32_t proposed_placement; |
struct ttm_mem_reg mem; |
// struct file *persistant_swap_storage; |
// struct ttm_tt *ttm; |
bool evicted; |
/** |
* Members protected by the bo::reserved lock only when written to. |
*/ |
// atomic_t cpu_writers; |
/** |
* Members protected by the bdev::lru_lock. |
*/ |
struct list_head lru; |
struct list_head ddestroy; |
struct list_head swap; |
uint32_t val_seq; |
bool seq_valid; |
/** |
* Members protected by the bdev::lru_lock |
* only when written to. |
*/ |
// atomic_t reserved; |
/** |
* Members protected by the bo::lock |
*/ |
void *sync_obj_arg; |
void *sync_obj; |
unsigned long priv_flags; |
/** |
* Members protected by the bdev::vm_lock |
*/ |
// struct rb_node vm_rb; |
struct drm_mm_node *vm_node; |
/** |
* Special members that are protected by the reserve lock |
* and the bo::lock when written to. Can be read with |
* either of these locks held. |
*/ |
unsigned long offset; |
uint32_t cur_placement; |
}; |
struct radeon_object |
{ |
struct ttm_buffer_object tobj; |
struct list_head list; |
struct radeon_device *rdev; |
struct drm_gem_object *gobj; |
// struct ttm_bo_kmap_obj kmap; |
unsigned pin_count; |
uint64_t gpu_addr; |
void *kptr; |
bool is_iomem; |
struct drm_mm_node *mm_node; |
u32_t vm_addr; |
u32_t cpu_addr; |
u32_t flags; |
}; |
#endif |